2024-12-12 01:14:52,811 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-12 01:14:52,836 main DEBUG Took 0.021228 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-12 01:14:52,837 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-12 01:14:52,837 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-12 01:14:52,838 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-12 01:14:52,839 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 01:14:52,854 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-12 01:14:52,888 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 01:14:52,890 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 01:14:52,892 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 01:14:52,895 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 01:14:52,896 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 01:14:52,896 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 01:14:52,897 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 01:14:52,898 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 01:14:52,899 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 01:14:52,899 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 01:14:52,900 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 01:14:52,901 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 01:14:52,901 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 01:14:52,902 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 01:14:52,902 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 01:14:52,903 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 01:14:52,903 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 01:14:52,904 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 01:14:52,905 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 01:14:52,905 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 01:14:52,906 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 01:14:52,906 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 01:14:52,907 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 01:14:52,907 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 01:14:52,908 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 01:14:52,908 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-12 01:14:52,910 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 01:14:52,912 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-12 01:14:52,915 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-12 01:14:52,915 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-12 01:14:52,922 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-12 01:14:52,923 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-12 01:14:52,948 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-12 01:14:52,955 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-12 01:14:52,957 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-12 01:14:52,958 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-12 01:14:52,959 main DEBUG createAppenders(={Console}) 2024-12-12 01:14:52,960 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad initialized 2024-12-12 01:14:52,960 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-12 01:14:52,961 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad OK. 2024-12-12 01:14:52,961 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-12 01:14:52,962 main DEBUG OutputStream closed 2024-12-12 01:14:52,962 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-12 01:14:52,962 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-12 01:14:52,963 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@7c711375 OK 2024-12-12 01:14:53,134 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-12 01:14:53,148 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-12 01:14:53,150 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-12 01:14:53,151 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-12 01:14:53,152 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-12 01:14:53,153 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-12 01:14:53,153 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-12 01:14:53,154 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-12 01:14:53,154 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-12 01:14:53,154 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-12 01:14:53,155 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-12 01:14:53,155 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-12 01:14:53,156 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-12 01:14:53,156 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-12 01:14:53,156 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-12 01:14:53,157 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-12 01:14:53,157 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-12 01:14:53,158 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-12 01:14:53,163 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-12 01:14:53,164 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@554e218) with optional ClassLoader: null 2024-12-12 01:14:53,164 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-12 01:14:53,174 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@554e218] started OK. 2024-12-12T01:14:53,220 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-12 01:14:53,226 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-12 01:14:53,226 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-12T01:14:53,817 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d 2024-12-12T01:14:53,818 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-12-12T01:14:53,873 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-12T01:14:54,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-12T01:14:54,289 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e, deleteOnExit=true 2024-12-12T01:14:54,290 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-12T01:14:54,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/test.cache.data in system properties and HBase conf 2024-12-12T01:14:54,304 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.tmp.dir in system properties and HBase conf 2024-12-12T01:14:54,304 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir in system properties and HBase conf 2024-12-12T01:14:54,305 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-12T01:14:54,305 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-12T01:14:54,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-12T01:14:54,422 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-12T01:14:54,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-12T01:14:54,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-12T01:14:54,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-12T01:14:54,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T01:14:54,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-12T01:14:54,431 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-12T01:14:54,432 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T01:14:54,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T01:14:54,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-12T01:14:54,434 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/nfs.dump.dir in system properties and HBase conf 2024-12-12T01:14:54,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/java.io.tmpdir in system properties and HBase conf 2024-12-12T01:14:54,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T01:14:54,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-12T01:14:54,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-12T01:14:55,609 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-12T01:14:55,801 INFO [Time-limited test {}] log.Log(170): Logging initialized @4348ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-12T01:14:55,938 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T01:14:56,047 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T01:14:56,089 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T01:14:56,089 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T01:14:56,091 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T01:14:56,109 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T01:14:56,112 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a1d5810{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,AVAILABLE} 2024-12-12T01:14:56,114 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62b16b1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T01:14:56,367 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@71d736e3{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/java.io.tmpdir/jetty-localhost-33705-hadoop-hdfs-3_4_1-tests_jar-_-any-13601440857837732616/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T01:14:56,378 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f00f70b{HTTP/1.1, (http/1.1)}{localhost:33705} 2024-12-12T01:14:56,378 INFO [Time-limited test {}] server.Server(415): Started @4927ms 2024-12-12T01:14:56,935 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T01:14:56,944 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T01:14:56,946 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T01:14:56,946 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T01:14:56,946 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T01:14:56,947 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4107ad3f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,AVAILABLE} 2024-12-12T01:14:56,949 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ae01b05{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T01:14:57,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@323a2aa2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/java.io.tmpdir/jetty-localhost-40107-hadoop-hdfs-3_4_1-tests_jar-_-any-17909032664563676405/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T01:14:57,110 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@50927177{HTTP/1.1, (http/1.1)}{localhost:40107} 2024-12-12T01:14:57,111 INFO [Time-limited test {}] server.Server(415): Started @5659ms 2024-12-12T01:14:57,202 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T01:14:57,626 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T01:14:57,633 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T01:14:57,676 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T01:14:57,676 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T01:14:57,676 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T01:14:57,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3188e696{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,AVAILABLE} 2024-12-12T01:14:57,681 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71ade74d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T01:14:57,849 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@486061d4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/java.io.tmpdir/jetty-localhost-42853-hadoop-hdfs-3_4_1-tests_jar-_-any-1975245874645263992/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T01:14:57,853 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41d36900{HTTP/1.1, (http/1.1)}{localhost:42853} 2024-12-12T01:14:57,854 INFO [Time-limited test {}] server.Server(415): Started @6403ms 2024-12-12T01:14:57,858 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T01:14:58,079 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T01:14:58,092 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T01:14:58,123 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data1/current/BP-1163377979-172.17.0.2-1733966095287/current, will proceed with Du for space computation calculation, 2024-12-12T01:14:58,133 WARN [Thread-102 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data2/current/BP-1163377979-172.17.0.2-1733966095287/current, will proceed with Du for space computation calculation, 2024-12-12T01:14:58,141 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T01:14:58,141 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T01:14:58,141 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-12T01:14:58,176 WARN [Thread-109 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data3/current/BP-1163377979-172.17.0.2-1733966095287/current, will proceed with Du for space computation calculation, 2024-12-12T01:14:58,183 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@80b4649{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,AVAILABLE} 2024-12-12T01:14:58,184 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ef7cd2a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T01:14:58,227 WARN [Thread-110 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data4/current/BP-1163377979-172.17.0.2-1733966095287/current, will proceed with Du for space computation calculation, 2024-12-12T01:14:58,281 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T01:14:58,306 WARN [Thread-83 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T01:14:58,364 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa9caab4394178965 with lease ID 0xe35966cbe47382ad: Processing first storage report for DS-ded111ef-8a50-4258-b66c-6d781d5302f4 from datanode DatanodeRegistration(127.0.0.1:34547, datanodeUuid=dba9e474-68ec-4a87-934b-bada2110a343, infoPort=42871, infoSecurePort=0, ipcPort=40851, storageInfo=lv=-57;cid=testClusterID;nsid=192597864;c=1733966095287) 2024-12-12T01:14:58,366 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa9caab4394178965 with lease ID 0xe35966cbe47382ad: from storage DS-ded111ef-8a50-4258-b66c-6d781d5302f4 node DatanodeRegistration(127.0.0.1:34547, datanodeUuid=dba9e474-68ec-4a87-934b-bada2110a343, infoPort=42871, infoSecurePort=0, ipcPort=40851, storageInfo=lv=-57;cid=testClusterID;nsid=192597864;c=1733966095287), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-12T01:14:58,367 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcd3392180dadad48 with lease ID 0xe35966cbe47382ac: Processing first storage report for DS-23c3192d-5425-4bff-b10a-e8d775ff9537 from datanode DatanodeRegistration(127.0.0.1:42945, datanodeUuid=0f665c89-e3d9-45cc-bcb2-2a3b8d2b6300, infoPort=34145, infoSecurePort=0, ipcPort=34129, storageInfo=lv=-57;cid=testClusterID;nsid=192597864;c=1733966095287) 2024-12-12T01:14:58,367 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd3392180dadad48 with lease ID 0xe35966cbe47382ac: from storage DS-23c3192d-5425-4bff-b10a-e8d775ff9537 node DatanodeRegistration(127.0.0.1:42945, datanodeUuid=0f665c89-e3d9-45cc-bcb2-2a3b8d2b6300, infoPort=34145, infoSecurePort=0, ipcPort=34129, storageInfo=lv=-57;cid=testClusterID;nsid=192597864;c=1733966095287), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-12T01:14:58,367 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa9caab4394178965 with lease ID 0xe35966cbe47382ad: Processing first storage report for DS-278210c2-80fc-47c2-a189-942acfe2343f from datanode DatanodeRegistration(127.0.0.1:34547, datanodeUuid=dba9e474-68ec-4a87-934b-bada2110a343, infoPort=42871, infoSecurePort=0, ipcPort=40851, storageInfo=lv=-57;cid=testClusterID;nsid=192597864;c=1733966095287) 2024-12-12T01:14:58,368 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa9caab4394178965 with lease ID 0xe35966cbe47382ad: from storage DS-278210c2-80fc-47c2-a189-942acfe2343f node DatanodeRegistration(127.0.0.1:34547, datanodeUuid=dba9e474-68ec-4a87-934b-bada2110a343, infoPort=42871, infoSecurePort=0, ipcPort=40851, storageInfo=lv=-57;cid=testClusterID;nsid=192597864;c=1733966095287), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T01:14:58,368 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcd3392180dadad48 with lease ID 0xe35966cbe47382ac: Processing first storage report for DS-c5688393-bf6a-4a0a-bb60-2ff816d43539 from datanode DatanodeRegistration(127.0.0.1:42945, datanodeUuid=0f665c89-e3d9-45cc-bcb2-2a3b8d2b6300, infoPort=34145, infoSecurePort=0, ipcPort=34129, storageInfo=lv=-57;cid=testClusterID;nsid=192597864;c=1733966095287) 2024-12-12T01:14:58,368 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd3392180dadad48 with lease ID 0xe35966cbe47382ac: from storage DS-c5688393-bf6a-4a0a-bb60-2ff816d43539 node DatanodeRegistration(127.0.0.1:42945, datanodeUuid=0f665c89-e3d9-45cc-bcb2-2a3b8d2b6300, infoPort=34145, infoSecurePort=0, ipcPort=34129, storageInfo=lv=-57;cid=testClusterID;nsid=192597864;c=1733966095287), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T01:14:58,381 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b930908{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/java.io.tmpdir/jetty-localhost-45835-hadoop-hdfs-3_4_1-tests_jar-_-any-10121392747596143467/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T01:14:58,383 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c465c66{HTTP/1.1, (http/1.1)}{localhost:45835} 2024-12-12T01:14:58,383 INFO [Time-limited test {}] server.Server(415): Started @6932ms 2024-12-12T01:14:58,387 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T01:14:58,577 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data5/current/BP-1163377979-172.17.0.2-1733966095287/current, will proceed with Du for space computation calculation, 2024-12-12T01:14:58,596 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data6/current/BP-1163377979-172.17.0.2-1733966095287/current, will proceed with Du for space computation calculation, 2024-12-12T01:14:58,688 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T01:14:58,697 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf23058556b5a1a91 with lease ID 0xe35966cbe47382ae: Processing first storage report for DS-31c8f016-3bfa-40b7-bd61-caab7f8132d0 from datanode DatanodeRegistration(127.0.0.1:39595, datanodeUuid=96a69992-28f1-447f-a6d6-ea4f454937b0, infoPort=33949, infoSecurePort=0, ipcPort=35865, storageInfo=lv=-57;cid=testClusterID;nsid=192597864;c=1733966095287) 2024-12-12T01:14:58,698 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf23058556b5a1a91 with lease ID 0xe35966cbe47382ae: from storage DS-31c8f016-3bfa-40b7-bd61-caab7f8132d0 node DatanodeRegistration(127.0.0.1:39595, datanodeUuid=96a69992-28f1-447f-a6d6-ea4f454937b0, infoPort=33949, infoSecurePort=0, ipcPort=35865, storageInfo=lv=-57;cid=testClusterID;nsid=192597864;c=1733966095287), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T01:14:58,698 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf23058556b5a1a91 with lease ID 0xe35966cbe47382ae: Processing first storage report for DS-46b1123e-7de8-44a3-887a-365dfa132402 from datanode DatanodeRegistration(127.0.0.1:39595, datanodeUuid=96a69992-28f1-447f-a6d6-ea4f454937b0, infoPort=33949, infoSecurePort=0, ipcPort=35865, storageInfo=lv=-57;cid=testClusterID;nsid=192597864;c=1733966095287) 2024-12-12T01:14:58,698 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf23058556b5a1a91 with lease ID 0xe35966cbe47382ae: from storage DS-46b1123e-7de8-44a3-887a-365dfa132402 node DatanodeRegistration(127.0.0.1:39595, datanodeUuid=96a69992-28f1-447f-a6d6-ea4f454937b0, infoPort=33949, infoSecurePort=0, ipcPort=35865, storageInfo=lv=-57;cid=testClusterID;nsid=192597864;c=1733966095287), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-12T01:14:58,912 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d 2024-12-12T01:14:59,020 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/zookeeper_0, clientPort=54997, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-12T01:14:59,041 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54997 2024-12-12T01:14:59,067 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T01:14:59,072 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T01:14:59,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741825_1001 (size=7) 2024-12-12T01:14:59,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741825_1001 (size=7) 2024-12-12T01:14:59,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741825_1001 (size=7) 2024-12-12T01:14:59,881 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 with version=8 2024-12-12T01:14:59,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/hbase-staging 2024-12-12T01:15:00,031 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-12T01:15:00,409 INFO [Time-limited test {}] client.ConnectionUtils(128): master/36236b0d7605:0 server-side Connection retries=45 2024-12-12T01:15:00,425 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T01:15:00,426 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T01:15:00,431 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T01:15:00,432 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T01:15:00,432 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T01:15:00,627 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-12T01:15:00,717 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-12T01:15:00,729 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-12T01:15:00,734 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T01:15:00,769 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 12103 (auto-detected) 2024-12-12T01:15:00,770 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-12T01:15:00,798 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36083 2024-12-12T01:15:00,834 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36083 connecting to ZooKeeper ensemble=127.0.0.1:54997 2024-12-12T01:15:00,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:360830x0, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T01:15:00,913 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36083-0x10083caab1a0000 connected 2024-12-12T01:15:01,027 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T01:15:01,033 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T01:15:01,053 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T01:15:01,057 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221, hbase.cluster.distributed=false 2024-12-12T01:15:01,108 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T01:15:01,116 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36083 2024-12-12T01:15:01,118 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36083 2024-12-12T01:15:01,121 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36083 2024-12-12T01:15:01,128 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36083 2024-12-12T01:15:01,128 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36083 2024-12-12T01:15:01,292 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/36236b0d7605:0 server-side Connection retries=45 2024-12-12T01:15:01,295 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T01:15:01,296 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T01:15:01,296 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T01:15:01,296 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T01:15:01,296 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T01:15:01,299 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T01:15:01,303 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T01:15:01,304 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38295 2024-12-12T01:15:01,306 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38295 connecting to ZooKeeper ensemble=127.0.0.1:54997 2024-12-12T01:15:01,308 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T01:15:01,313 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T01:15:01,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:382950x0, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T01:15:01,328 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:382950x0, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T01:15:01,337 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T01:15:01,340 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38295-0x10083caab1a0001 connected 2024-12-12T01:15:01,349 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T01:15:01,352 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T01:15:01,365 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T01:15:01,368 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38295 2024-12-12T01:15:01,369 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38295 2024-12-12T01:15:01,374 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38295 2024-12-12T01:15:01,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38295 2024-12-12T01:15:01,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38295 2024-12-12T01:15:01,409 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/36236b0d7605:0 server-side Connection retries=45 2024-12-12T01:15:01,409 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T01:15:01,410 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T01:15:01,410 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T01:15:01,411 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T01:15:01,411 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T01:15:01,411 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T01:15:01,412 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T01:15:01,413 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44743 2024-12-12T01:15:01,415 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44743 connecting to ZooKeeper ensemble=127.0.0.1:54997 2024-12-12T01:15:01,417 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T01:15:01,420 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T01:15:01,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:447430x0, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T01:15:01,429 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44743-0x10083caab1a0002 connected 2024-12-12T01:15:01,431 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T01:15:01,432 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T01:15:01,437 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T01:15:01,438 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T01:15:01,441 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T01:15:01,442 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44743 2024-12-12T01:15:01,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44743 2024-12-12T01:15:01,444 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44743 2024-12-12T01:15:01,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44743 2024-12-12T01:15:01,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44743 2024-12-12T01:15:01,480 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/36236b0d7605:0 server-side Connection retries=45 2024-12-12T01:15:01,480 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T01:15:01,480 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T01:15:01,480 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T01:15:01,481 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T01:15:01,481 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T01:15:01,481 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T01:15:01,481 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T01:15:01,483 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33883 2024-12-12T01:15:01,485 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33883 connecting to ZooKeeper ensemble=127.0.0.1:54997 2024-12-12T01:15:01,487 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T01:15:01,489 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T01:15:01,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:338830x0, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T01:15:01,497 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:338830x0, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T01:15:01,498 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T01:15:01,498 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33883-0x10083caab1a0003 connected 2024-12-12T01:15:01,504 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T01:15:01,505 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T01:15:01,507 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T01:15:01,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33883 2024-12-12T01:15:01,512 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33883 2024-12-12T01:15:01,514 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33883 2024-12-12T01:15:01,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33883 2024-12-12T01:15:01,517 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33883 2024-12-12T01:15:01,535 DEBUG [M:0;36236b0d7605:36083 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;36236b0d7605:36083 2024-12-12T01:15:01,540 INFO [master/36236b0d7605:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/36236b0d7605,36083,1733966100114 2024-12-12T01:15:01,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T01:15:01,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T01:15:01,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T01:15:01,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T01:15:01,556 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/36236b0d7605,36083,1733966100114 2024-12-12T01:15:01,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T01:15:01,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T01:15:01,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:01,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:01,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T01:15:01,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:01,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:01,597 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-12T01:15:01,599 INFO [master/36236b0d7605:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/36236b0d7605,36083,1733966100114 from backup master directory 2024-12-12T01:15:01,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/36236b0d7605,36083,1733966100114 2024-12-12T01:15:01,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T01:15:01,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T01:15:01,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T01:15:01,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T01:15:01,604 WARN [master/36236b0d7605:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T01:15:01,605 INFO [master/36236b0d7605:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=36236b0d7605,36083,1733966100114 2024-12-12T01:15:01,608 INFO [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-12T01:15:01,609 INFO [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-12T01:15:01,688 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/hbase.id] with ID: c7aab131-a6d9-4901-b39f-42031992260b 2024-12-12T01:15:01,688 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.tmp/hbase.id 2024-12-12T01:15:01,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741826_1002 (size=42) 2024-12-12T01:15:01,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741826_1002 (size=42) 2024-12-12T01:15:01,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741826_1002 (size=42) 2024-12-12T01:15:01,729 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.tmp/hbase.id]:[hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/hbase.id] 2024-12-12T01:15:01,805 INFO [master/36236b0d7605:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T01:15:01,811 INFO [master/36236b0d7605:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-12T01:15:01,837 INFO [master/36236b0d7605:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 24ms. 2024-12-12T01:15:01,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:01,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:01,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:01,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:01,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741827_1003 (size=196) 2024-12-12T01:15:01,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741827_1003 (size=196) 2024-12-12T01:15:01,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741827_1003 (size=196) 2024-12-12T01:15:01,898 INFO [master/36236b0d7605:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T01:15:01,900 INFO [master/36236b0d7605:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-12T01:15:01,918 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T01:15:01,923 INFO [master/36236b0d7605:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T01:15:01,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741828_1004 (size=1189) 2024-12-12T01:15:01,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741828_1004 (size=1189) 2024-12-12T01:15:01,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741828_1004 (size=1189) 2024-12-12T01:15:01,996 INFO [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/data/master/store 2024-12-12T01:15:02,054 WARN [IPC Server handler 3 on default port 33353 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-12T01:15:02,055 WARN [IPC Server handler 3 on default port 33353 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-12T01:15:02,055 WARN [IPC Server handler 3 on default port 33353 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-12T01:15:02,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741829_1005 (size=34) 2024-12-12T01:15:02,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741829_1005 (size=34) 2024-12-12T01:15:02,114 INFO [master/36236b0d7605:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-12T01:15:02,119 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:15:02,122 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T01:15:02,122 INFO [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T01:15:02,122 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T01:15:02,124 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T01:15:02,125 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T01:15:02,126 INFO [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T01:15:02,131 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733966102122Disabling compacts and flushes for region at 1733966102122Disabling writes for close at 1733966102125 (+3 ms)Writing region close event to WAL at 1733966102125Closed at 1733966102125 2024-12-12T01:15:02,142 WARN [master/36236b0d7605:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/data/master/store/.initializing 2024-12-12T01:15:02,143 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/WALs/36236b0d7605,36083,1733966100114 2024-12-12T01:15:02,164 INFO [master/36236b0d7605:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T01:15:02,187 INFO [master/36236b0d7605:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=36236b0d7605%2C36083%2C1733966100114, suffix=, logDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/WALs/36236b0d7605,36083,1733966100114, archiveDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/oldWALs, maxLogs=10 2024-12-12T01:15:02,217 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/WALs/36236b0d7605,36083,1733966100114/36236b0d7605%2C36083%2C1733966100114.1733966102193, exclude list is [], retry=0 2024-12-12T01:15:02,221 WARN [IPC Server handler 0 on default port 33353 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-12T01:15:02,222 WARN [IPC Server handler 0 on default port 33353 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-12T01:15:02,222 WARN [IPC Server handler 0 on default port 33353 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-12T01:15:02,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42945,DS-23c3192d-5425-4bff-b10a-e8d775ff9537,DISK] 2024-12-12T01:15:02,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34547,DS-ded111ef-8a50-4258-b66c-6d781d5302f4,DISK] 2024-12-12T01:15:02,243 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-12T01:15:02,309 INFO [master/36236b0d7605:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/WALs/36236b0d7605,36083,1733966100114/36236b0d7605%2C36083%2C1733966100114.1733966102193 2024-12-12T01:15:02,310 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34145:34145),(127.0.0.1/127.0.0.1:42871:42871)] 2024-12-12T01:15:02,311 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-12T01:15:02,312 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:15:02,320 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T01:15:02,321 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T01:15:02,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T01:15:02,424 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-12T01:15:02,429 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:02,435 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T01:15:02,436 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T01:15:02,443 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-12T01:15:02,443 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:02,445 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:15:02,445 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T01:15:02,449 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-12T01:15:02,449 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:02,451 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:15:02,451 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T01:15:02,455 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-12T01:15:02,456 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:02,457 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:15:02,457 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T01:15:02,462 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T01:15:02,464 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T01:15:02,473 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T01:15:02,474 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T01:15:02,480 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-12T01:15:02,494 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T01:15:02,505 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:15:02,507 INFO [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69524272, jitterRate=0.03599238395690918}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-12T01:15:02,516 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733966102342Initializing all the Stores at 1733966102345 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733966102346 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966102347 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966102347Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966102347Cleaning up temporary data from old regions at 1733966102474 (+127 ms)Region opened successfully at 1733966102516 (+42 ms) 2024-12-12T01:15:02,524 INFO [master/36236b0d7605:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-12T01:15:02,574 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2514f2de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=36236b0d7605/172.17.0.2:0 2024-12-12T01:15:02,624 INFO [master/36236b0d7605:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-12T01:15:02,643 INFO [master/36236b0d7605:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-12T01:15:02,644 INFO [master/36236b0d7605:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-12T01:15:02,649 INFO [master/36236b0d7605:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-12T01:15:02,650 INFO [master/36236b0d7605:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-12T01:15:02,658 INFO [master/36236b0d7605:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 7 msec 2024-12-12T01:15:02,659 INFO [master/36236b0d7605:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-12T01:15:02,698 INFO [master/36236b0d7605:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-12T01:15:02,709 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-12T01:15:02,711 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-12T01:15:02,714 INFO [master/36236b0d7605:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-12T01:15:02,715 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-12T01:15:02,719 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-12T01:15:02,722 INFO [master/36236b0d7605:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-12T01:15:02,730 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-12T01:15:02,732 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-12T01:15:02,734 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-12T01:15:02,736 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-12T01:15:02,755 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-12T01:15:02,761 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-12T01:15:02,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T01:15:02,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T01:15:02,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T01:15:02,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:02,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:02,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:02,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T01:15:02,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:02,769 INFO [master/36236b0d7605:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=36236b0d7605,36083,1733966100114, sessionid=0x10083caab1a0000, setting cluster-up flag (Was=false) 2024-12-12T01:15:02,782 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-12T01:15:02,782 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-12T01:15:02,782 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-12T01:15:02,782 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-12T01:15:02,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:02,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:02,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:02,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:02,793 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-12T01:15:02,795 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=36236b0d7605,36083,1733966100114 2024-12-12T01:15:02,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:02,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:02,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:02,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:02,811 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-12T01:15:02,815 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=36236b0d7605,36083,1733966100114 2024-12-12T01:15:02,824 INFO [master/36236b0d7605:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-12T01:15:02,881 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-12-12T01:15:02,887 INFO [master/36236b0d7605:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:15:02,888 INFO [master/36236b0d7605:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-12T01:15:02,922 INFO [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(746): ClusterId : c7aab131-a6d9-4901-b39f-42031992260b 2024-12-12T01:15:02,922 INFO [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(746): ClusterId : c7aab131-a6d9-4901-b39f-42031992260b 2024-12-12T01:15:02,923 INFO [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(746): ClusterId : c7aab131-a6d9-4901-b39f-42031992260b 2024-12-12T01:15:02,926 DEBUG [RS:2;36236b0d7605:33883 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T01:15:02,926 DEBUG [RS:0;36236b0d7605:38295 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T01:15:02,926 DEBUG [RS:1;36236b0d7605:44743 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T01:15:02,934 DEBUG [RS:0;36236b0d7605:38295 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T01:15:02,934 DEBUG [RS:0;36236b0d7605:38295 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T01:15:02,935 DEBUG [RS:1;36236b0d7605:44743 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T01:15:02,935 DEBUG [RS:1;36236b0d7605:44743 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T01:15:02,942 DEBUG [RS:2;36236b0d7605:33883 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T01:15:02,942 DEBUG [RS:0;36236b0d7605:38295 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T01:15:02,942 DEBUG [RS:2;36236b0d7605:33883 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T01:15:02,943 DEBUG [RS:0;36236b0d7605:38295 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a2179d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=36236b0d7605/172.17.0.2:0 2024-12-12T01:15:02,946 DEBUG [RS:1;36236b0d7605:44743 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T01:15:02,947 DEBUG [RS:1;36236b0d7605:44743 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12432b0d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=36236b0d7605/172.17.0.2:0 2024-12-12T01:15:02,950 DEBUG [RS:2;36236b0d7605:33883 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T01:15:02,950 DEBUG [RS:2;36236b0d7605:33883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2edadf60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=36236b0d7605/172.17.0.2:0 2024-12-12T01:15:02,953 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-12T01:15:02,965 DEBUG [RS:0;36236b0d7605:38295 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;36236b0d7605:38295 2024-12-12T01:15:02,965 INFO [master/36236b0d7605:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-12T01:15:02,967 DEBUG [RS:1;36236b0d7605:44743 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;36236b0d7605:44743 2024-12-12T01:15:02,969 INFO [RS:0;36236b0d7605:38295 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-12T01:15:02,969 INFO [RS:1;36236b0d7605:44743 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-12T01:15:02,969 INFO [RS:0;36236b0d7605:38295 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-12T01:15:02,969 INFO [RS:1;36236b0d7605:44743 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-12T01:15:02,970 DEBUG [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-12T01:15:02,970 DEBUG [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-12T01:15:02,970 INFO [RS:0;36236b0d7605:38295 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:15:02,970 INFO [RS:1;36236b0d7605:44743 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:15:02,970 DEBUG [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-12T01:15:02,970 DEBUG [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-12T01:15:02,971 DEBUG [RS:2;36236b0d7605:33883 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;36236b0d7605:33883 2024-12-12T01:15:02,971 INFO [RS:2;36236b0d7605:33883 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-12T01:15:02,971 INFO [RS:2;36236b0d7605:33883 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-12T01:15:02,971 DEBUG [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-12T01:15:02,972 INFO [RS:2;36236b0d7605:33883 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:15:02,972 DEBUG [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-12T01:15:02,973 INFO [master/36236b0d7605:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-12T01:15:02,974 INFO [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(2659): reportForDuty to master=36236b0d7605,36083,1733966100114 with port=38295, startcode=1733966101221 2024-12-12T01:15:02,974 INFO [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(2659): reportForDuty to master=36236b0d7605,36083,1733966100114 with port=33883, startcode=1733966101479 2024-12-12T01:15:02,974 INFO [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(2659): reportForDuty to master=36236b0d7605,36083,1733966100114 with port=44743, startcode=1733966101408 2024-12-12T01:15:02,980 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 36236b0d7605,36083,1733966100114 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-12T01:15:02,988 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/36236b0d7605:0, corePoolSize=5, maxPoolSize=5 2024-12-12T01:15:02,988 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/36236b0d7605:0, corePoolSize=5, maxPoolSize=5 2024-12-12T01:15:02,989 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/36236b0d7605:0, corePoolSize=5, maxPoolSize=5 2024-12-12T01:15:02,989 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/36236b0d7605:0, corePoolSize=5, maxPoolSize=5 2024-12-12T01:15:02,989 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/36236b0d7605:0, corePoolSize=10, maxPoolSize=10 2024-12-12T01:15:02,989 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:02,989 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/36236b0d7605:0, corePoolSize=2, maxPoolSize=2 2024-12-12T01:15:02,989 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:02,989 DEBUG [RS:0;36236b0d7605:38295 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T01:15:02,989 DEBUG [RS:2;36236b0d7605:33883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T01:15:02,989 DEBUG [RS:1;36236b0d7605:44743 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T01:15:02,996 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-12T01:15:02,996 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-12T01:15:03,004 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:03,005 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-12T01:15:03,013 INFO [master/36236b0d7605:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733966133013 2024-12-12T01:15:03,014 WARN [IPC Server handler 2 on default port 33353 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-12T01:15:03,014 WARN [IPC Server handler 2 on default port 33353 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-12T01:15:03,015 WARN [IPC Server handler 2 on default port 33353 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-12T01:15:03,015 INFO [master/36236b0d7605:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-12T01:15:03,017 INFO [master/36236b0d7605:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-12T01:15:03,022 INFO [master/36236b0d7605:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-12T01:15:03,022 INFO [master/36236b0d7605:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-12T01:15:03,023 INFO [master/36236b0d7605:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-12T01:15:03,023 INFO [master/36236b0d7605:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-12T01:15:03,028 INFO [master/36236b0d7605:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,039 INFO [master/36236b0d7605:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-12T01:15:03,041 INFO [master/36236b0d7605:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-12T01:15:03,042 INFO [master/36236b0d7605:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-12T01:15:03,065 INFO [master/36236b0d7605:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-12T01:15:03,066 INFO [master/36236b0d7605:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-12T01:15:03,066 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44885, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T01:15:03,067 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51155, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T01:15:03,067 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52879, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T01:15:03,074 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-12T01:15:03,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741831_1007 (size=1321) 2024-12-12T01:15:03,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741831_1007 (size=1321) 2024-12-12T01:15:03,082 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-12T01:15:03,083 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:15:03,083 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-12T01:15:03,085 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-12T01:15:03,085 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/36236b0d7605:0:becomeActiveMaster-HFileCleaner.large.0-1733966103068,5,FailOnTimeoutGroup] 2024-12-12T01:15:03,087 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/36236b0d7605:0:becomeActiveMaster-HFileCleaner.small.0-1733966103086,5,FailOnTimeoutGroup] 2024-12-12T01:15:03,087 INFO [master/36236b0d7605:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,087 INFO [master/36236b0d7605:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-12T01:15:03,089 INFO [master/36236b0d7605:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,090 INFO [master/36236b0d7605:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,096 WARN [IPC Server handler 0 on default port 33353 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-12T01:15:03,096 WARN [IPC Server handler 0 on default port 33353 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-12T01:15:03,097 WARN [IPC Server handler 0 on default port 33353 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-12T01:15:03,112 DEBUG [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-12T01:15:03,112 WARN [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-12T01:15:03,113 DEBUG [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-12T01:15:03,113 WARN [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-12T01:15:03,114 DEBUG [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-12T01:15:03,115 WARN [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-12T01:15:03,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741832_1008 (size=32) 2024-12-12T01:15:03,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741832_1008 (size=32) 2024-12-12T01:15:03,127 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:15:03,134 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T01:15:03,139 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T01:15:03,139 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:03,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T01:15:03,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-12T01:15:03,144 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-12T01:15:03,145 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:03,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T01:15:03,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T01:15:03,149 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T01:15:03,149 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:03,150 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T01:15:03,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T01:15:03,155 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T01:15:03,156 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:03,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T01:15:03,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-12T01:15:03,160 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740 2024-12-12T01:15:03,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740 2024-12-12T01:15:03,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-12T01:15:03,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-12T01:15:03,166 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-12T01:15:03,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-12T01:15:03,175 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:15:03,176 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74987320, jitterRate=0.11739814281463623}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-12T01:15:03,190 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733966103127Initializing all the Stores at 1733966103130 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733966103130Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733966103132 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966103132Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733966103132Cleaning up temporary data from old regions at 1733966103164 (+32 ms)Region opened successfully at 1733966103190 (+26 ms) 2024-12-12T01:15:03,190 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-12T01:15:03,190 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-12T01:15:03,191 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-12T01:15:03,191 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T01:15:03,191 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T01:15:03,197 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-12T01:15:03,197 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733966103190Disabling compacts and flushes for region at 1733966103190Disabling writes for close at 1733966103191 (+1 ms)Writing region close event to WAL at 1733966103196 (+5 ms)Closed at 1733966103197 (+1 ms) 2024-12-12T01:15:03,203 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-12T01:15:03,203 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-12T01:15:03,214 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-12T01:15:03,214 INFO [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(2659): reportForDuty to master=36236b0d7605,36083,1733966100114 with port=33883, startcode=1733966101479 2024-12-12T01:15:03,216 INFO [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(2659): reportForDuty to master=36236b0d7605,36083,1733966100114 with port=38295, startcode=1733966101221 2024-12-12T01:15:03,216 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 36236b0d7605,33883,1733966101479 2024-12-12T01:15:03,220 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] master.ServerManager(517): Registering regionserver=36236b0d7605,33883,1733966101479 2024-12-12T01:15:03,221 INFO [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(2659): reportForDuty to master=36236b0d7605,36083,1733966100114 with port=44743, startcode=1733966101408 2024-12-12T01:15:03,229 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-12T01:15:03,234 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-12T01:15:03,239 DEBUG [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:15:03,239 DEBUG [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33353 2024-12-12T01:15:03,239 DEBUG [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-12T01:15:03,244 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 36236b0d7605,38295,1733966101221 2024-12-12T01:15:03,244 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] master.ServerManager(517): Registering regionserver=36236b0d7605,38295,1733966101221 2024-12-12T01:15:03,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T01:15:03,247 DEBUG [RS:2;36236b0d7605:33883 {}] zookeeper.ZKUtil(111): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/36236b0d7605,33883,1733966101479 2024-12-12T01:15:03,247 WARN [RS:2;36236b0d7605:33883 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T01:15:03,247 INFO [RS:2;36236b0d7605:33883 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T01:15:03,248 DEBUG [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/WALs/36236b0d7605,33883,1733966101479 2024-12-12T01:15:03,248 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 36236b0d7605,44743,1733966101408 2024-12-12T01:15:03,248 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] master.ServerManager(517): Registering regionserver=36236b0d7605,44743,1733966101408 2024-12-12T01:15:03,248 DEBUG [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:15:03,248 DEBUG [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33353 2024-12-12T01:15:03,248 DEBUG [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-12T01:15:03,253 DEBUG [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:15:03,253 DEBUG [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33353 2024-12-12T01:15:03,253 DEBUG [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-12T01:15:03,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T01:15:03,258 DEBUG [RS:0;36236b0d7605:38295 {}] zookeeper.ZKUtil(111): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/36236b0d7605,38295,1733966101221 2024-12-12T01:15:03,258 WARN [RS:0;36236b0d7605:38295 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T01:15:03,259 INFO [RS:0;36236b0d7605:38295 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T01:15:03,259 DEBUG [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/WALs/36236b0d7605,38295,1733966101221 2024-12-12T01:15:03,263 DEBUG [RS:1;36236b0d7605:44743 {}] zookeeper.ZKUtil(111): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/36236b0d7605,44743,1733966101408 2024-12-12T01:15:03,263 WARN [RS:1;36236b0d7605:44743 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T01:15:03,263 INFO [RS:1;36236b0d7605:44743 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T01:15:03,263 DEBUG [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/WALs/36236b0d7605,44743,1733966101408 2024-12-12T01:15:03,279 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [36236b0d7605,33883,1733966101479] 2024-12-12T01:15:03,280 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [36236b0d7605,38295,1733966101221] 2024-12-12T01:15:03,282 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [36236b0d7605,44743,1733966101408] 2024-12-12T01:15:03,315 INFO [RS:0;36236b0d7605:38295 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T01:15:03,324 INFO [RS:1;36236b0d7605:44743 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T01:15:03,324 INFO [RS:2;36236b0d7605:33883 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T01:15:03,354 INFO [RS:0;36236b0d7605:38295 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T01:15:03,355 INFO [RS:1;36236b0d7605:44743 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T01:15:03,360 INFO [RS:2;36236b0d7605:33883 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T01:15:03,366 INFO [RS:2;36236b0d7605:33883 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T01:15:03,367 INFO [RS:0;36236b0d7605:38295 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T01:15:03,367 INFO [RS:2;36236b0d7605:33883 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,367 INFO [RS:0;36236b0d7605:38295 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,367 INFO [RS:1;36236b0d7605:44743 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T01:15:03,367 INFO [RS:1;36236b0d7605:44743 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,369 INFO [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-12T01:15:03,377 INFO [RS:2;36236b0d7605:33883 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-12T01:15:03,379 INFO [RS:2;36236b0d7605:33883 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,380 DEBUG [RS:2;36236b0d7605:33883 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,380 INFO [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-12T01:15:03,381 DEBUG [RS:2;36236b0d7605:33883 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,381 DEBUG [RS:2;36236b0d7605:33883 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,381 DEBUG [RS:2;36236b0d7605:33883 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,381 DEBUG [RS:2;36236b0d7605:33883 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,382 DEBUG [RS:2;36236b0d7605:33883 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/36236b0d7605:0, corePoolSize=2, maxPoolSize=2 2024-12-12T01:15:03,382 DEBUG [RS:2;36236b0d7605:33883 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,382 INFO [RS:0;36236b0d7605:38295 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-12T01:15:03,383 INFO [RS:0;36236b0d7605:38295 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,383 DEBUG [RS:0;36236b0d7605:38295 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,383 DEBUG [RS:0;36236b0d7605:38295 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,383 DEBUG [RS:0;36236b0d7605:38295 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,383 DEBUG [RS:0;36236b0d7605:38295 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,384 DEBUG [RS:0;36236b0d7605:38295 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,384 DEBUG [RS:0;36236b0d7605:38295 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/36236b0d7605:0, corePoolSize=2, maxPoolSize=2 2024-12-12T01:15:03,384 DEBUG [RS:0;36236b0d7605:38295 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,384 INFO [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-12T01:15:03,384 DEBUG [RS:0;36236b0d7605:38295 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,384 DEBUG [RS:0;36236b0d7605:38295 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,385 DEBUG [RS:0;36236b0d7605:38295 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,385 DEBUG [RS:0;36236b0d7605:38295 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,385 DEBUG [RS:0;36236b0d7605:38295 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,385 DEBUG [RS:0;36236b0d7605:38295 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0, corePoolSize=3, maxPoolSize=3 2024-12-12T01:15:03,385 DEBUG [RS:0;36236b0d7605:38295 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/36236b0d7605:0, corePoolSize=3, maxPoolSize=3 2024-12-12T01:15:03,386 INFO [RS:1;36236b0d7605:44743 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-12T01:15:03,386 INFO [RS:1;36236b0d7605:44743 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,386 DEBUG [RS:2;36236b0d7605:33883 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,386 DEBUG [RS:1;36236b0d7605:44743 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,387 DEBUG [RS:2;36236b0d7605:33883 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,387 DEBUG [RS:1;36236b0d7605:44743 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,387 DEBUG [RS:2;36236b0d7605:33883 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,387 DEBUG [RS:1;36236b0d7605:44743 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,387 DEBUG [RS:1;36236b0d7605:44743 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,387 DEBUG [RS:2;36236b0d7605:33883 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,387 DEBUG [RS:1;36236b0d7605:44743 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,387 DEBUG [RS:2;36236b0d7605:33883 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,387 DEBUG [RS:1;36236b0d7605:44743 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/36236b0d7605:0, corePoolSize=2, maxPoolSize=2 2024-12-12T01:15:03,387 DEBUG [RS:2;36236b0d7605:33883 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0, corePoolSize=3, maxPoolSize=3 2024-12-12T01:15:03,388 DEBUG [RS:1;36236b0d7605:44743 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,388 DEBUG [RS:2;36236b0d7605:33883 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/36236b0d7605:0, corePoolSize=3, maxPoolSize=3 2024-12-12T01:15:03,388 WARN [36236b0d7605:36083 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-12T01:15:03,388 DEBUG [RS:1;36236b0d7605:44743 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,389 DEBUG [RS:1;36236b0d7605:44743 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,389 DEBUG [RS:1;36236b0d7605:44743 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,389 DEBUG [RS:1;36236b0d7605:44743 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,389 DEBUG [RS:1;36236b0d7605:44743 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/36236b0d7605:0, corePoolSize=1, maxPoolSize=1 2024-12-12T01:15:03,389 DEBUG [RS:1;36236b0d7605:44743 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0, corePoolSize=3, maxPoolSize=3 2024-12-12T01:15:03,389 DEBUG [RS:1;36236b0d7605:44743 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/36236b0d7605:0, corePoolSize=3, maxPoolSize=3 2024-12-12T01:15:03,413 INFO [RS:0;36236b0d7605:38295 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,413 INFO [RS:0;36236b0d7605:38295 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,414 INFO [RS:0;36236b0d7605:38295 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,414 INFO [RS:0;36236b0d7605:38295 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,414 INFO [RS:0;36236b0d7605:38295 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,414 INFO [RS:0;36236b0d7605:38295 {}] hbase.ChoreService(168): Chore ScheduledChore name=36236b0d7605,38295,1733966101221-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T01:15:03,415 INFO [RS:1;36236b0d7605:44743 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,415 INFO [RS:1;36236b0d7605:44743 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,416 INFO [RS:1;36236b0d7605:44743 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,416 INFO [RS:1;36236b0d7605:44743 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,416 INFO [RS:1;36236b0d7605:44743 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,416 INFO [RS:1;36236b0d7605:44743 {}] hbase.ChoreService(168): Chore ScheduledChore name=36236b0d7605,44743,1733966101408-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T01:15:03,442 INFO [RS:2;36236b0d7605:33883 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,442 INFO [RS:2;36236b0d7605:33883 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,442 INFO [RS:2;36236b0d7605:33883 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,442 INFO [RS:2;36236b0d7605:33883 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,442 INFO [RS:2;36236b0d7605:33883 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,442 INFO [RS:2;36236b0d7605:33883 {}] hbase.ChoreService(168): Chore ScheduledChore name=36236b0d7605,33883,1733966101479-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T01:15:03,468 INFO [RS:1;36236b0d7605:44743 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-12T01:15:03,472 INFO [RS:0;36236b0d7605:38295 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-12T01:15:03,474 INFO [RS:1;36236b0d7605:44743 {}] hbase.ChoreService(168): Chore ScheduledChore name=36236b0d7605,44743,1733966101408-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,477 INFO [RS:0;36236b0d7605:38295 {}] hbase.ChoreService(168): Chore ScheduledChore name=36236b0d7605,38295,1733966101221-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,477 INFO [RS:1;36236b0d7605:44743 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,478 INFO [RS:0;36236b0d7605:38295 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,478 INFO [RS:0;36236b0d7605:38295 {}] regionserver.Replication(171): 36236b0d7605,38295,1733966101221 started 2024-12-12T01:15:03,483 INFO [RS:1;36236b0d7605:44743 {}] regionserver.Replication(171): 36236b0d7605,44743,1733966101408 started 2024-12-12T01:15:03,496 INFO [RS:2;36236b0d7605:33883 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-12T01:15:03,496 INFO [RS:2;36236b0d7605:33883 {}] hbase.ChoreService(168): Chore ScheduledChore name=36236b0d7605,33883,1733966101479-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,497 INFO [RS:2;36236b0d7605:33883 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,497 INFO [RS:2;36236b0d7605:33883 {}] regionserver.Replication(171): 36236b0d7605,33883,1733966101479 started 2024-12-12T01:15:03,532 INFO [RS:0;36236b0d7605:38295 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,532 INFO [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(1482): Serving as 36236b0d7605,38295,1733966101221, RpcServer on 36236b0d7605/172.17.0.2:38295, sessionid=0x10083caab1a0001 2024-12-12T01:15:03,533 DEBUG [RS:0;36236b0d7605:38295 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T01:15:03,534 DEBUG [RS:0;36236b0d7605:38295 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 36236b0d7605,38295,1733966101221 2024-12-12T01:15:03,534 DEBUG [RS:0;36236b0d7605:38295 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '36236b0d7605,38295,1733966101221' 2024-12-12T01:15:03,534 DEBUG [RS:0;36236b0d7605:38295 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T01:15:03,540 INFO [RS:1;36236b0d7605:44743 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,541 INFO [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(1482): Serving as 36236b0d7605,44743,1733966101408, RpcServer on 36236b0d7605/172.17.0.2:44743, sessionid=0x10083caab1a0002 2024-12-12T01:15:03,542 DEBUG [RS:1;36236b0d7605:44743 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T01:15:03,542 DEBUG [RS:1;36236b0d7605:44743 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 36236b0d7605,44743,1733966101408 2024-12-12T01:15:03,543 DEBUG [RS:1;36236b0d7605:44743 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '36236b0d7605,44743,1733966101408' 2024-12-12T01:15:03,546 DEBUG [RS:1;36236b0d7605:44743 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T01:15:03,548 DEBUG [RS:0;36236b0d7605:38295 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T01:15:03,549 DEBUG [RS:0;36236b0d7605:38295 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T01:15:03,549 DEBUG [RS:0;36236b0d7605:38295 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T01:15:03,550 DEBUG [RS:0;36236b0d7605:38295 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 36236b0d7605,38295,1733966101221 2024-12-12T01:15:03,550 DEBUG [RS:0;36236b0d7605:38295 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '36236b0d7605,38295,1733966101221' 2024-12-12T01:15:03,550 DEBUG [RS:0;36236b0d7605:38295 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T01:15:03,551 DEBUG [RS:0;36236b0d7605:38295 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T01:15:03,556 DEBUG [RS:0;36236b0d7605:38295 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T01:15:03,556 INFO [RS:0;36236b0d7605:38295 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T01:15:03,556 INFO [RS:0;36236b0d7605:38295 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T01:15:03,560 INFO [RS:2;36236b0d7605:33883 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:03,560 INFO [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(1482): Serving as 36236b0d7605,33883,1733966101479, RpcServer on 36236b0d7605/172.17.0.2:33883, sessionid=0x10083caab1a0003 2024-12-12T01:15:03,560 DEBUG [RS:2;36236b0d7605:33883 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T01:15:03,560 DEBUG [RS:2;36236b0d7605:33883 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 36236b0d7605,33883,1733966101479 2024-12-12T01:15:03,560 DEBUG [RS:2;36236b0d7605:33883 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '36236b0d7605,33883,1733966101479' 2024-12-12T01:15:03,560 DEBUG [RS:2;36236b0d7605:33883 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T01:15:03,556 DEBUG [RS:1;36236b0d7605:44743 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T01:15:03,561 DEBUG [RS:1;36236b0d7605:44743 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T01:15:03,562 DEBUG [RS:1;36236b0d7605:44743 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T01:15:03,562 DEBUG [RS:1;36236b0d7605:44743 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 36236b0d7605,44743,1733966101408 2024-12-12T01:15:03,562 DEBUG [RS:1;36236b0d7605:44743 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '36236b0d7605,44743,1733966101408' 2024-12-12T01:15:03,562 DEBUG [RS:1;36236b0d7605:44743 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T01:15:03,562 DEBUG [RS:2;36236b0d7605:33883 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T01:15:03,563 DEBUG [RS:1;36236b0d7605:44743 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T01:15:03,563 DEBUG [RS:2;36236b0d7605:33883 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T01:15:03,563 DEBUG [RS:2;36236b0d7605:33883 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T01:15:03,563 DEBUG [RS:2;36236b0d7605:33883 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 36236b0d7605,33883,1733966101479 2024-12-12T01:15:03,563 DEBUG [RS:2;36236b0d7605:33883 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '36236b0d7605,33883,1733966101479' 2024-12-12T01:15:03,563 DEBUG [RS:2;36236b0d7605:33883 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T01:15:03,564 DEBUG [RS:1;36236b0d7605:44743 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T01:15:03,564 INFO [RS:1;36236b0d7605:44743 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T01:15:03,564 INFO [RS:1;36236b0d7605:44743 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T01:15:03,565 DEBUG [RS:2;36236b0d7605:33883 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T01:15:03,566 DEBUG [RS:2;36236b0d7605:33883 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T01:15:03,566 INFO [RS:2;36236b0d7605:33883 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T01:15:03,566 INFO [RS:2;36236b0d7605:33883 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T01:15:03,676 INFO [RS:2;36236b0d7605:33883 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T01:15:03,676 INFO [RS:0;36236b0d7605:38295 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T01:15:03,677 INFO [RS:1;36236b0d7605:44743 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T01:15:03,684 INFO [RS:1;36236b0d7605:44743 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=36236b0d7605%2C44743%2C1733966101408, suffix=, logDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/WALs/36236b0d7605,44743,1733966101408, archiveDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/oldWALs, maxLogs=32 2024-12-12T01:15:03,685 INFO [RS:0;36236b0d7605:38295 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=36236b0d7605%2C38295%2C1733966101221, suffix=, logDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/WALs/36236b0d7605,38295,1733966101221, archiveDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/oldWALs, maxLogs=32 2024-12-12T01:15:03,689 INFO [RS:2;36236b0d7605:33883 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=36236b0d7605%2C33883%2C1733966101479, suffix=, logDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/WALs/36236b0d7605,33883,1733966101479, archiveDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/oldWALs, maxLogs=32 2024-12-12T01:15:03,714 DEBUG [RS:1;36236b0d7605:44743 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/WALs/36236b0d7605,44743,1733966101408/36236b0d7605%2C44743%2C1733966101408.1733966103688, exclude list is [], retry=0 2024-12-12T01:15:03,716 DEBUG [RS:0;36236b0d7605:38295 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/WALs/36236b0d7605,38295,1733966101221/36236b0d7605%2C38295%2C1733966101221.1733966103689, exclude list is [], retry=0 2024-12-12T01:15:03,717 DEBUG [RS:2;36236b0d7605:33883 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/WALs/36236b0d7605,33883,1733966101479/36236b0d7605%2C33883%2C1733966101479.1733966103691, exclude list is [], retry=0 2024-12-12T01:15:03,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42945,DS-23c3192d-5425-4bff-b10a-e8d775ff9537,DISK] 2024-12-12T01:15:03,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34547,DS-ded111ef-8a50-4258-b66c-6d781d5302f4,DISK] 2024-12-12T01:15:03,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39595,DS-31c8f016-3bfa-40b7-bd61-caab7f8132d0,DISK] 2024-12-12T01:15:03,721 WARN [IPC Server handler 3 on default port 33353 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-12T01:15:03,722 WARN [IPC Server handler 3 on default port 33353 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-12T01:15:03,722 WARN [IPC Server handler 3 on default port 33353 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-12T01:15:03,723 WARN [IPC Server handler 2 on default port 33353 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-12T01:15:03,724 WARN [IPC Server handler 2 on default port 33353 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-12T01:15:03,724 WARN [IPC Server handler 2 on default port 33353 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-12T01:15:03,730 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42945,DS-23c3192d-5425-4bff-b10a-e8d775ff9537,DISK] 2024-12-12T01:15:03,730 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34547,DS-ded111ef-8a50-4258-b66c-6d781d5302f4,DISK] 2024-12-12T01:15:03,736 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34547,DS-ded111ef-8a50-4258-b66c-6d781d5302f4,DISK] 2024-12-12T01:15:03,736 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42945,DS-23c3192d-5425-4bff-b10a-e8d775ff9537,DISK] 2024-12-12T01:15:03,794 INFO [RS:1;36236b0d7605:44743 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/WALs/36236b0d7605,44743,1733966101408/36236b0d7605%2C44743%2C1733966101408.1733966103688 2024-12-12T01:15:03,809 DEBUG [RS:1;36236b0d7605:44743 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33949:33949),(127.0.0.1/127.0.0.1:34145:34145),(127.0.0.1/127.0.0.1:42871:42871)] 2024-12-12T01:15:03,833 INFO [RS:0;36236b0d7605:38295 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/WALs/36236b0d7605,38295,1733966101221/36236b0d7605%2C38295%2C1733966101221.1733966103689 2024-12-12T01:15:03,834 INFO [RS:2;36236b0d7605:33883 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/WALs/36236b0d7605,33883,1733966101479/36236b0d7605%2C33883%2C1733966101479.1733966103691 2024-12-12T01:15:03,836 DEBUG [RS:0;36236b0d7605:38295 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34145:34145),(127.0.0.1/127.0.0.1:42871:42871)] 2024-12-12T01:15:03,838 DEBUG [RS:2;36236b0d7605:33883 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34145:34145),(127.0.0.1/127.0.0.1:42871:42871)] 2024-12-12T01:15:03,893 DEBUG [36236b0d7605:36083 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-12T01:15:03,905 DEBUG [36236b0d7605:36083 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:15:03,916 DEBUG [36236b0d7605:36083 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:15:03,916 DEBUG [36236b0d7605:36083 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:15:03,916 DEBUG [36236b0d7605:36083 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:15:03,916 DEBUG [36236b0d7605:36083 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:15:03,916 DEBUG [36236b0d7605:36083 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:15:03,916 DEBUG [36236b0d7605:36083 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:15:03,917 INFO [36236b0d7605:36083 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:15:03,917 INFO [36236b0d7605:36083 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:15:03,917 INFO [36236b0d7605:36083 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:15:03,917 DEBUG [36236b0d7605:36083 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:15:03,927 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:15:03,939 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 36236b0d7605,44743,1733966101408, state=OPENING 2024-12-12T01:15:03,946 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-12T01:15:03,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:03,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:03,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:03,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:03,953 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T01:15:03,953 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T01:15:03,955 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T01:15:03,956 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T01:15:03,958 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-12T01:15:03,961 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=36236b0d7605,44743,1733966101408}] 2024-12-12T01:15:04,148 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T01:15:04,152 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40121, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T01:15:04,172 INFO [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-12T01:15:04,172 INFO [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T01:15:04,173 INFO [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-12T01:15:04,177 INFO [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=36236b0d7605%2C44743%2C1733966101408.meta, suffix=.meta, logDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/WALs/36236b0d7605,44743,1733966101408, archiveDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/oldWALs, maxLogs=32 2024-12-12T01:15:04,202 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/WALs/36236b0d7605,44743,1733966101408/36236b0d7605%2C44743%2C1733966101408.meta.1733966104180.meta, exclude list is [], retry=0 2024-12-12T01:15:04,206 WARN [IPC Server handler 3 on default port 33353 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-12T01:15:04,206 WARN [IPC Server handler 3 on default port 33353 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-12T01:15:04,206 WARN [IPC Server handler 3 on default port 33353 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-12T01:15:04,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34547,DS-ded111ef-8a50-4258-b66c-6d781d5302f4,DISK] 2024-12-12T01:15:04,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42945,DS-23c3192d-5425-4bff-b10a-e8d775ff9537,DISK] 2024-12-12T01:15:04,221 INFO [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/WALs/36236b0d7605,44743,1733966101408/36236b0d7605%2C44743%2C1733966101408.meta.1733966104180.meta 2024-12-12T01:15:04,224 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42871:42871),(127.0.0.1/127.0.0.1:34145:34145)] 2024-12-12T01:15:04,224 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-12T01:15:04,226 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-12T01:15:04,227 INFO [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:15:04,229 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-12T01:15:04,233 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-12T01:15:04,236 INFO [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-12T01:15:04,248 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-12T01:15:04,249 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:15:04,249 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-12T01:15:04,249 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-12T01:15:04,272 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T01:15:04,275 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T01:15:04,276 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:04,277 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T01:15:04,278 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-12T01:15:04,283 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-12T01:15:04,283 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:04,285 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T01:15:04,285 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T01:15:04,287 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T01:15:04,288 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:04,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T01:15:04,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T01:15:04,296 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T01:15:04,296 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:04,298 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T01:15:04,299 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-12T01:15:04,301 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740 2024-12-12T01:15:04,306 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740 2024-12-12T01:15:04,321 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-12T01:15:04,331 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-12T01:15:04,334 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-12T01:15:04,343 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-12T01:15:04,348 INFO [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71748854, jitterRate=0.06914123892784119}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-12T01:15:04,349 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-12T01:15:04,360 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733966104251Writing region info on filesystem at 1733966104252 (+1 ms)Initializing all the Stores at 1733966104259 (+7 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733966104260 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733966104271 (+11 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966104271Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733966104271Cleaning up temporary data from old regions at 1733966104331 (+60 ms)Running coprocessor post-open hooks at 1733966104349 (+18 ms)Region opened successfully at 1733966104360 (+11 ms) 2024-12-12T01:15:04,378 INFO [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733966104135 2024-12-12T01:15:04,395 DEBUG [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-12T01:15:04,397 INFO [RS_OPEN_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-12T01:15:04,399 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:15:04,402 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 36236b0d7605,44743,1733966101408, state=OPEN 2024-12-12T01:15:04,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T01:15:04,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T01:15:04,405 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T01:15:04,406 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=36236b0d7605,44743,1733966101408 2024-12-12T01:15:04,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T01:15:04,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T01:15:04,407 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T01:15:04,407 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T01:15:04,408 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T01:15:04,427 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-12T01:15:04,428 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=36236b0d7605,44743,1733966101408 in 445 msec 2024-12-12T01:15:04,433 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-12T01:15:04,435 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-12T01:15:04,435 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-12T01:15:04,434 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.2130 sec 2024-12-12T01:15:04,482 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:15:04,484 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:15:04,530 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:15:04,537 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34855, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:15:04,568 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.6690 sec 2024-12-12T01:15:04,569 INFO [master/36236b0d7605:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733966104568, completionTime=-1 2024-12-12T01:15:04,573 INFO [master/36236b0d7605:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-12T01:15:04,574 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-12T01:15:04,701 INFO [master/36236b0d7605:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-12T01:15:04,702 INFO [master/36236b0d7605:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733966164702 2024-12-12T01:15:04,702 INFO [master/36236b0d7605:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733966224702 2024-12-12T01:15:04,702 INFO [master/36236b0d7605:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 128 msec 2024-12-12T01:15:04,704 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:15:04,721 INFO [master/36236b0d7605:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=36236b0d7605,36083,1733966100114-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:04,722 INFO [master/36236b0d7605:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=36236b0d7605,36083,1733966100114-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:04,722 INFO [master/36236b0d7605:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=36236b0d7605,36083,1733966100114-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:04,725 INFO [master/36236b0d7605:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-36236b0d7605:36083, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:04,726 INFO [master/36236b0d7605:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:04,729 INFO [master/36236b0d7605:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:04,744 DEBUG [master/36236b0d7605:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-12T01:15:04,781 INFO [master/36236b0d7605:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.176sec 2024-12-12T01:15:04,794 INFO [master/36236b0d7605:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-12T01:15:04,795 INFO [master/36236b0d7605:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-12T01:15:04,797 INFO [master/36236b0d7605:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-12T01:15:04,798 INFO [master/36236b0d7605:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-12T01:15:04,798 INFO [master/36236b0d7605:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-12T01:15:04,799 INFO [master/36236b0d7605:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=36236b0d7605,36083,1733966100114-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T01:15:04,800 INFO [master/36236b0d7605:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=36236b0d7605,36083,1733966100114-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-12T01:15:04,839 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-12T01:15:04,839 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 36236b0d7605,36083,1733966100114 2024-12-12T01:15:04,843 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2badef03 2024-12-12T01:15:04,846 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T01:15:04,848 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48865, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T01:15:04,858 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36083 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-12T01:15:04,865 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@434a5c35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:04,872 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-12T01:15:04,872 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-12T01:15:04,877 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:15:04,881 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:15:04,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-12-12T01:15:04,887 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T01:15:04,894 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:04,895 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36083 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-12-12T01:15:04,898 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T01:15:04,899 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:15:04,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-12T01:15:04,911 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:15:04,911 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:15:04,911 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ff5fd07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:04,911 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:15:04,915 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:15:04,945 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:04,950 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53638, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:15:04,955 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23f92ad4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:04,955 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:15:04,968 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:15:04,969 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:15:05,005 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48946, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:15:05,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=36236b0d7605,36083,1733966100114 2024-12-12T01:15:05,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-12-12T01:15:05,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/test.cache.data in system properties and HBase conf 2024-12-12T01:15:05,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.tmp.dir in system properties and HBase conf 2024-12-12T01:15:05,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir in system properties and HBase conf 2024-12-12T01:15:05,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-12T01:15:05,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-12T01:15:05,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-12T01:15:05,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-12T01:15:05,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-12T01:15:05,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-12T01:15:05,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T01:15:05,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-12T01:15:05,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-12T01:15:05,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T01:15:05,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T01:15:05,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-12T01:15:05,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/nfs.dump.dir in system properties and HBase conf 2024-12-12T01:15:05,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/java.io.tmpdir in system properties and HBase conf 2024-12-12T01:15:05,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T01:15:05,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-12T01:15:05,013 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-12T01:15:05,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-12T01:15:05,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741837_1013 (size=349) 2024-12-12T01:15:05,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741837_1013 (size=349) 2024-12-12T01:15:05,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741837_1013 (size=349) 2024-12-12T01:15:05,085 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a76413eb65b7bad8091ed755bedda4fd, NAME => 'hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:15:05,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741838_1014 (size=36) 2024-12-12T01:15:05,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741838_1014 (size=36) 2024-12-12T01:15:05,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741838_1014 (size=36) 2024-12-12T01:15:05,125 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:15:05,125 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing a76413eb65b7bad8091ed755bedda4fd, disabling compactions & flushes 2024-12-12T01:15:05,125 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. 2024-12-12T01:15:05,126 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. 2024-12-12T01:15:05,126 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. after waiting 0 ms 2024-12-12T01:15:05,126 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. 2024-12-12T01:15:05,126 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. 2024-12-12T01:15:05,126 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for a76413eb65b7bad8091ed755bedda4fd: Waiting for close lock at 1733966105125Disabling compacts and flushes for region at 1733966105125Disabling writes for close at 1733966105126 (+1 ms)Writing region close event to WAL at 1733966105126Closed at 1733966105126 2024-12-12T01:15:05,130 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T01:15:05,139 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733966105131"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966105131"}]},"ts":"1733966105131"} 2024-12-12T01:15:05,148 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-12T01:15:05,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741839_1015 (size=592039) 2024-12-12T01:15:05,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741839_1015 (size=592039) 2024-12-12T01:15:05,151 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T01:15:05,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741839_1015 (size=592039) 2024-12-12T01:15:05,156 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966105151"}]},"ts":"1733966105151"} 2024-12-12T01:15:05,172 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-12T01:15:05,176 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:15:05,179 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:15:05,179 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:15:05,179 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:15:05,179 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:15:05,180 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:15:05,180 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:15:05,180 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:15:05,180 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:15:05,180 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:15:05,180 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:15:05,182 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=a76413eb65b7bad8091ed755bedda4fd, ASSIGN}] 2024-12-12T01:15:05,186 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=a76413eb65b7bad8091ed755bedda4fd, ASSIGN 2024-12-12T01:15:05,188 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=a76413eb65b7bad8091ed755bedda4fd, ASSIGN; state=OFFLINE, location=36236b0d7605,44743,1733966101408; forceNewPlan=false, retain=false 2024-12-12T01:15:05,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741840_1016 (size=1663647) 2024-12-12T01:15:05,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741840_1016 (size=1663647) 2024-12-12T01:15:05,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741840_1016 (size=1663647) 2024-12-12T01:15:05,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-12T01:15:05,342 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-12T01:15:05,344 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a76413eb65b7bad8091ed755bedda4fd, regionState=OPENING, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:15:05,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=a76413eb65b7bad8091ed755bedda4fd, ASSIGN because future has completed 2024-12-12T01:15:05,357 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a76413eb65b7bad8091ed755bedda4fd, server=36236b0d7605,44743,1733966101408}] 2024-12-12T01:15:05,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-12T01:15:05,784 INFO [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. 2024-12-12T01:15:05,785 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a76413eb65b7bad8091ed755bedda4fd, NAME => 'hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd.', STARTKEY => '', ENDKEY => ''} 2024-12-12T01:15:05,785 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. service=AccessControlService 2024-12-12T01:15:05,786 INFO [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:15:05,786 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl a76413eb65b7bad8091ed755bedda4fd 2024-12-12T01:15:05,786 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:15:05,786 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a76413eb65b7bad8091ed755bedda4fd 2024-12-12T01:15:05,786 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a76413eb65b7bad8091ed755bedda4fd 2024-12-12T01:15:05,803 INFO [StoreOpener-a76413eb65b7bad8091ed755bedda4fd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region a76413eb65b7bad8091ed755bedda4fd 2024-12-12T01:15:05,808 INFO [StoreOpener-a76413eb65b7bad8091ed755bedda4fd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a76413eb65b7bad8091ed755bedda4fd columnFamilyName l 2024-12-12T01:15:05,808 DEBUG [StoreOpener-a76413eb65b7bad8091ed755bedda4fd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:05,810 INFO [StoreOpener-a76413eb65b7bad8091ed755bedda4fd-1 {}] regionserver.HStore(327): Store=a76413eb65b7bad8091ed755bedda4fd/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:15:05,810 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a76413eb65b7bad8091ed755bedda4fd 2024-12-12T01:15:05,812 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/acl/a76413eb65b7bad8091ed755bedda4fd 2024-12-12T01:15:05,813 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/acl/a76413eb65b7bad8091ed755bedda4fd 2024-12-12T01:15:05,814 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a76413eb65b7bad8091ed755bedda4fd 2024-12-12T01:15:05,814 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a76413eb65b7bad8091ed755bedda4fd 2024-12-12T01:15:05,818 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a76413eb65b7bad8091ed755bedda4fd 2024-12-12T01:15:05,853 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/acl/a76413eb65b7bad8091ed755bedda4fd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:15:05,856 INFO [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened a76413eb65b7bad8091ed755bedda4fd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61086411, jitterRate=-0.0897415429353714}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:15:05,856 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a76413eb65b7bad8091ed755bedda4fd 2024-12-12T01:15:05,860 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a76413eb65b7bad8091ed755bedda4fd: Running coprocessor pre-open hook at 1733966105786Writing region info on filesystem at 1733966105787 (+1 ms)Initializing all the Stores at 1733966105797 (+10 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733966105797Cleaning up temporary data from old regions at 1733966105814 (+17 ms)Running coprocessor post-open hooks at 1733966105856 (+42 ms)Region opened successfully at 1733966105860 (+4 ms) 2024-12-12T01:15:05,864 INFO [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., pid=6, masterSystemTime=1733966105721 2024-12-12T01:15:05,871 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. 2024-12-12T01:15:05,871 INFO [RS_OPEN_PRIORITY_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. 2024-12-12T01:15:05,872 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a76413eb65b7bad8091ed755bedda4fd, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:15:05,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a76413eb65b7bad8091ed755bedda4fd, server=36236b0d7605,44743,1733966101408 because future has completed 2024-12-12T01:15:05,905 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=36236b0d7605,44743,1733966101408, table=hbase:acl, region=a76413eb65b7bad8091ed755bedda4fd. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-12T01:15:05,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-12T01:15:05,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a76413eb65b7bad8091ed755bedda4fd, server=36236b0d7605,44743,1733966101408 in 549 msec 2024-12-12T01:15:05,922 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-12T01:15:05,922 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=a76413eb65b7bad8091ed755bedda4fd, ASSIGN in 731 msec 2024-12-12T01:15:05,925 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T01:15:05,925 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966105925"}]},"ts":"1733966105925"} 2024-12-12T01:15:05,933 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-12T01:15:05,936 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T01:15:05,942 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 1.0710 sec 2024-12-12T01:15:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-12T01:15:06,071 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-12-12T01:15:06,114 DEBUG [master/36236b0d7605:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-12T01:15:06,115 INFO [master/36236b0d7605:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-12T01:15:06,116 INFO [master/36236b0d7605:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=36236b0d7605,36083,1733966100114-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T01:15:07,195 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T01:15:07,344 WARN [Thread-371 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T01:15:07,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741829_1005 (size=34) 2024-12-12T01:15:07,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741831_1007 (size=1321) 2024-12-12T01:15:07,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741832_1008 (size=32) 2024-12-12T01:15:07,718 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T01:15:07,719 WARN [Thread-371 {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-12T01:15:07,720 INFO [Thread-371 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T01:15:07,784 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T01:15:07,784 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T01:15:07,784 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T01:15:07,788 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T01:15:07,789 INFO [Thread-371 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T01:15:07,789 INFO [Thread-371 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T01:15:07,789 INFO [Thread-371 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-12T01:15:07,790 INFO [Thread-371 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7efa4ca8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,AVAILABLE} 2024-12-12T01:15:07,790 INFO [Thread-371 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@796e2812{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-12T01:15:07,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d1d5bb9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,AVAILABLE} 2024-12-12T01:15:07,798 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5336f6f1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-12T01:15:08,016 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices as a root resource class 2024-12-12T01:15:08,016 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver as a provider class 2024-12-12T01:15:08,016 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-12T01:15:08,025 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-12T01:15:08,117 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T01:15:08,372 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T01:15:08,938 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices to GuiceManagedComponentProvider with the scope "PerRequest" 2024-12-12T01:15:08,961 INFO [Thread-371 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6b16933f{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/java.io.tmpdir/jetty-localhost-42311-hadoop-yarn-common-3_4_1_jar-_-any-13731917498661367810/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-12T01:15:08,961 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@537e6d2d{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/java.io.tmpdir/jetty-localhost-40957-hadoop-yarn-common-3_4_1_jar-_-any-17149652202428256534/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-12T01:15:08,962 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@456f2b74{HTTP/1.1, (http/1.1)}{localhost:40957} 2024-12-12T01:15:08,962 INFO [Time-limited test {}] server.Server(415): Started @17511ms 2024-12-12T01:15:08,968 INFO [Thread-371 {}] server.AbstractConnector(333): Started ServerConnector@3686a4b4{HTTP/1.1, (http/1.1)}{localhost:42311} 2024-12-12T01:15:08,968 INFO [Thread-371 {}] server.Server(415): Started @17517ms 2024-12-12T01:15:09,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741841_1017 (size=5) 2024-12-12T01:15:09,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741841_1017 (size=5) 2024-12-12T01:15:09,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741841_1017 (size=5) 2024-12-12T01:15:09,770 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:15:09,921 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-12T01:15:09,923 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-12T01:15:10,389 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-12T01:15:10,395 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T01:15:10,435 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-12T01:15:10,436 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T01:15:10,444 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T01:15:10,444 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T01:15:10,444 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T01:15:10,445 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T01:15:10,447 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a3d235c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,AVAILABLE} 2024-12-12T01:15:10,447 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35f6d89f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-12T01:15:10,507 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-12T01:15:10,507 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-12T01:15:10,508 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-12T01:15:10,508 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-12T01:15:10,523 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T01:15:10,553 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T01:15:10,676 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T01:15:10,689 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@440030ba{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/java.io.tmpdir/jetty-localhost-39117-hadoop-yarn-common-3_4_1_jar-_-any-13312357084681607607/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-12T01:15:10,689 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35130132{HTTP/1.1, (http/1.1)}{localhost:39117} 2024-12-12T01:15:10,690 INFO [Time-limited test {}] server.Server(415): Started @19239ms 2024-12-12T01:15:10,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:15:10,713 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-12T01:15:10,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-12T01:15:10,714 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-12T01:15:10,716 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-12T01:15:10,716 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-12T01:15:10,717 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:15:10,717 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-12T01:15:10,718 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-12T01:15:10,718 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-12T01:15:10,719 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:15:10,719 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-12T01:15:10,719 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T01:15:10,719 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-12T01:15:10,720 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-12T01:15:10,720 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-12T01:15:10,945 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-12T01:15:10,949 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T01:15:10,989 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-12T01:15:10,990 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T01:15:11,017 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T01:15:11,017 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T01:15:11,017 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-12T01:15:11,024 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T01:15:11,025 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@658498f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,AVAILABLE} 2024-12-12T01:15:11,026 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@207b4e3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-12T01:15:11,101 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-12T01:15:11,102 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-12T01:15:11,102 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-12T01:15:11,102 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-12T01:15:11,145 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T01:15:11,155 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T01:15:11,302 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-12T01:15:11,308 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3837ae7d{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/java.io.tmpdir/jetty-localhost-43395-hadoop-yarn-common-3_4_1_jar-_-any-8399943453969140968/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-12T01:15:11,309 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@760d8954{HTTP/1.1, (http/1.1)}{localhost:43395} 2024-12-12T01:15:11,310 INFO [Time-limited test {}] server.Server(415): Started @19858ms 2024-12-12T01:15:11,348 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-12-12T01:15:11,350 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:15:11,392 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=715, OpenFileDescriptor=757, MaxFileDescriptor=1048576, SystemLoadAverage=601, ProcessCount=11, AvailableMemoryMB=10038 2024-12-12T01:15:11,394 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=715 is superior to 500 2024-12-12T01:15:11,399 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-12T01:15:11,405 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 36236b0d7605,36083,1733966100114 2024-12-12T01:15:11,405 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3319ca7a 2024-12-12T01:15:11,406 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T01:15:11,410 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53652, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T01:15:11,412 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T01:15:11,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:11,425 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T01:15:11,427 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-12-12T01:15:11,427 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:11,430 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T01:15:11,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-12T01:15:11,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741842_1018 (size=422) 2024-12-12T01:15:11,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741842_1018 (size=422) 2024-12-12T01:15:11,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741842_1018 (size=422) 2024-12-12T01:15:11,482 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 83614ff2025743f7080c6ab5c0ee90ed, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:15:11,482 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1e4a47b0b955b8e01f81d0da6b552a65, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:15:11,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741843_1019 (size=83) 2024-12-12T01:15:11,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741843_1019 (size=83) 2024-12-12T01:15:11,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741843_1019 (size=83) 2024-12-12T01:15:11,522 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:15:11,522 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing 83614ff2025743f7080c6ab5c0ee90ed, disabling compactions & flushes 2024-12-12T01:15:11,523 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. 2024-12-12T01:15:11,523 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. 2024-12-12T01:15:11,523 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. after waiting 0 ms 2024-12-12T01:15:11,523 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. 2024-12-12T01:15:11,523 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. 2024-12-12T01:15:11,523 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 83614ff2025743f7080c6ab5c0ee90ed: Waiting for close lock at 1733966111522Disabling compacts and flushes for region at 1733966111522Disabling writes for close at 1733966111523 (+1 ms)Writing region close event to WAL at 1733966111523Closed at 1733966111523 2024-12-12T01:15:11,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741844_1020 (size=83) 2024-12-12T01:15:11,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-12T01:15:11,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741844_1020 (size=83) 2024-12-12T01:15:11,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741844_1020 (size=83) 2024-12-12T01:15:11,540 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:15:11,540 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 1e4a47b0b955b8e01f81d0da6b552a65, disabling compactions & flushes 2024-12-12T01:15:11,540 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:15:11,540 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:15:11,540 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. after waiting 0 ms 2024-12-12T01:15:11,540 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:15:11,540 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:15:11,541 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1e4a47b0b955b8e01f81d0da6b552a65: Waiting for close lock at 1733966111540Disabling compacts and flushes for region at 1733966111540Disabling writes for close at 1733966111540Writing region close event to WAL at 1733966111540Closed at 1733966111540 2024-12-12T01:15:11,544 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T01:15:11,544 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733966111544"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966111544"}]},"ts":"1733966111544"} 2024-12-12T01:15:11,544 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733966111544"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966111544"}]},"ts":"1733966111544"} 2024-12-12T01:15:11,588 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-12T01:15:11,590 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T01:15:11,591 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966111591"}]},"ts":"1733966111591"} 2024-12-12T01:15:11,595 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-12T01:15:11,595 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:15:11,598 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:15:11,598 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:15:11,598 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:15:11,598 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:15:11,598 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:15:11,598 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:15:11,598 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:15:11,598 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:15:11,598 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:15:11,598 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:15:11,599 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1e4a47b0b955b8e01f81d0da6b552a65, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=83614ff2025743f7080c6ab5c0ee90ed, ASSIGN}] 2024-12-12T01:15:11,602 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1e4a47b0b955b8e01f81d0da6b552a65, ASSIGN 2024-12-12T01:15:11,602 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=83614ff2025743f7080c6ab5c0ee90ed, ASSIGN 2024-12-12T01:15:11,605 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1e4a47b0b955b8e01f81d0da6b552a65, ASSIGN; state=OFFLINE, location=36236b0d7605,38295,1733966101221; forceNewPlan=false, retain=false 2024-12-12T01:15:11,605 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=83614ff2025743f7080c6ab5c0ee90ed, ASSIGN; state=OFFLINE, location=36236b0d7605,33883,1733966101479; forceNewPlan=false, retain=false 2024-12-12T01:15:11,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-12T01:15:11,755 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T01:15:11,756 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=83614ff2025743f7080c6ab5c0ee90ed, regionState=OPENING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:15:11,756 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=1e4a47b0b955b8e01f81d0da6b552a65, regionState=OPENING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:15:11,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=83614ff2025743f7080c6ab5c0ee90ed, ASSIGN because future has completed 2024-12-12T01:15:11,763 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 83614ff2025743f7080c6ab5c0ee90ed, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:15:11,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1e4a47b0b955b8e01f81d0da6b552a65, ASSIGN because future has completed 2024-12-12T01:15:11,766 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1e4a47b0b955b8e01f81d0da6b552a65, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:15:11,921 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T01:15:11,921 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T01:15:11,940 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57153, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T01:15:11,940 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56297, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T01:15:11,950 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. 2024-12-12T01:15:11,951 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 83614ff2025743f7080c6ab5c0ee90ed, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T01:15:11,951 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. service=AccessControlService 2024-12-12T01:15:11,951 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:15:11,951 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:15:11,952 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:11,952 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 1e4a47b0b955b8e01f81d0da6b552a65, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T01:15:11,952 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:15:11,952 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:11,952 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. service=AccessControlService 2024-12-12T01:15:11,952 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:11,953 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:15:11,953 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:11,953 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:15:11,953 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:11,953 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:11,955 INFO [StoreOpener-83614ff2025743f7080c6ab5c0ee90ed-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:11,956 INFO [StoreOpener-1e4a47b0b955b8e01f81d0da6b552a65-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:11,958 INFO [StoreOpener-83614ff2025743f7080c6ab5c0ee90ed-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 83614ff2025743f7080c6ab5c0ee90ed columnFamilyName cf 2024-12-12T01:15:11,958 DEBUG [StoreOpener-83614ff2025743f7080c6ab5c0ee90ed-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:11,958 INFO [StoreOpener-1e4a47b0b955b8e01f81d0da6b552a65-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e4a47b0b955b8e01f81d0da6b552a65 columnFamilyName cf 2024-12-12T01:15:11,959 DEBUG [StoreOpener-1e4a47b0b955b8e01f81d0da6b552a65-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:11,959 INFO [StoreOpener-83614ff2025743f7080c6ab5c0ee90ed-1 {}] regionserver.HStore(327): Store=83614ff2025743f7080c6ab5c0ee90ed/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:15:11,959 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:11,959 INFO [StoreOpener-1e4a47b0b955b8e01f81d0da6b552a65-1 {}] regionserver.HStore(327): Store=1e4a47b0b955b8e01f81d0da6b552a65/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:15:11,960 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:11,961 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:11,961 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:11,962 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:11,962 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:11,963 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:11,963 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:11,963 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:11,963 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:11,966 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:11,966 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:11,970 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:15:11,971 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:15:11,971 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 83614ff2025743f7080c6ab5c0ee90ed; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68147449, jitterRate=0.015476122498512268}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:15:11,971 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:11,971 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 1e4a47b0b955b8e01f81d0da6b552a65; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69706369, jitterRate=0.038705840706825256}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:15:11,972 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:11,973 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 1e4a47b0b955b8e01f81d0da6b552a65: Running coprocessor pre-open hook at 1733966111954Writing region info on filesystem at 1733966111954Initializing all the Stores at 1733966111955 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966111956 (+1 ms)Cleaning up temporary data from old regions at 1733966111963 (+7 ms)Running coprocessor post-open hooks at 1733966111972 (+9 ms)Region opened successfully at 1733966111973 (+1 ms) 2024-12-12T01:15:11,973 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 83614ff2025743f7080c6ab5c0ee90ed: Running coprocessor pre-open hook at 1733966111953Writing region info on filesystem at 1733966111953Initializing all the Stores at 1733966111955 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966111955Cleaning up temporary data from old regions at 1733966111963 (+8 ms)Running coprocessor post-open hooks at 1733966111971 (+8 ms)Region opened successfully at 1733966111973 (+2 ms) 2024-12-12T01:15:11,975 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65., pid=11, masterSystemTime=1733966111920 2024-12-12T01:15:11,976 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed., pid=10, masterSystemTime=1733966111920 2024-12-12T01:15:11,979 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:15:11,979 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:15:11,981 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=1e4a47b0b955b8e01f81d0da6b552a65, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:15:11,986 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1e4a47b0b955b8e01f81d0da6b552a65, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:15:11,986 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=36236b0d7605,38295,1733966101221, table=testtb-testExportFileSystemStateWithSplitRegion, region=1e4a47b0b955b8e01f81d0da6b552a65. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-12T01:15:11,986 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. 2024-12-12T01:15:11,986 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. 2024-12-12T01:15:11,987 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=83614ff2025743f7080c6ab5c0ee90ed, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:15:11,991 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 83614ff2025743f7080c6ab5c0ee90ed, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:15:11,992 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=8 2024-12-12T01:15:11,992 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 1e4a47b0b955b8e01f81d0da6b552a65, server=36236b0d7605,38295,1733966101221 in 223 msec 2024-12-12T01:15:11,995 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1e4a47b0b955b8e01f81d0da6b552a65, ASSIGN in 393 msec 2024-12-12T01:15:11,997 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-12T01:15:11,997 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 83614ff2025743f7080c6ab5c0ee90ed, server=36236b0d7605,33883,1733966101479 in 230 msec 2024-12-12T01:15:12,001 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-12T01:15:12,002 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=83614ff2025743f7080c6ab5c0ee90ed, ASSIGN in 398 msec 2024-12-12T01:15:12,003 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T01:15:12,003 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966112003"}]},"ts":"1733966112003"} 2024-12-12T01:15:12,007 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-12T01:15:12,009 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T01:15:12,015 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-12T01:15:12,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:15:12,034 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:15:12,034 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:15:12,035 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:15:12,036 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34229, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-12T01:15:12,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:15:12,042 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-12T01:15:12,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-12T01:15:12,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-12T01:15:12,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-12T01:15:12,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-12T01:15:12,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:12,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-12T01:15:12,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:12,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:12,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:15:12,066 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:15:12,066 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:15:12,067 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:15:12,067 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:15:12,072 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 655 msec 2024-12-12T01:15:12,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-12T01:15:12,567 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-12T01:15:12,567 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSplitRegion get assigned. Timeout = 60000ms 2024-12-12T01:15:12,568 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:15:12,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned to meta. Checking AM states. 2024-12-12T01:15:12,578 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:15:12,579 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned. 2024-12-12T01:15:12,585 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-12T01:15:12,607 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-12T01:15:12,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966112607 (current time:1733966112607). 2024-12-12T01:15:12,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:15:12,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-12T01:15:12,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:15:12,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f112e14, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:12,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:15:12,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:15:12,612 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:15:12,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:15:12,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:15:12,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78979ef6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:12,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:15:12,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:15:12,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:12,615 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53680, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:15:12,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b690453, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:12,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:15:12,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:15:12,624 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:15:12,626 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48954, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:15:12,629 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:15:12,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:15:12,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:12,645 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:15:12,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:12,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b7d6676, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:12,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:15:12,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:15:12,664 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:15:12,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:15:12,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:15:12,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:12,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24c9e2b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:12,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:15:12,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:15:12,669 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53692, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:15:12,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@790aaf4b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:12,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:15:12,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:15:12,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:15:12,675 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58886, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:15:12,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:15:12,682 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:15:12,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:15:12,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:12,683 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:15:12,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:12,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-12T01:15:12,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:15:12,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-12T01:15:12,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-12T01:15:12,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-12T01:15:12,714 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:15:12,722 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:15:12,744 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:15:12,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741845_1021 (size=215) 2024-12-12T01:15:12,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741845_1021 (size=215) 2024-12-12T01:15:12,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741845_1021 (size=215) 2024-12-12T01:15:12,781 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:15:12,785 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1e4a47b0b955b8e01f81d0da6b552a65}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83614ff2025743f7080c6ab5c0ee90ed}] 2024-12-12T01:15:12,791 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:12,791 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:12,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-12T01:15:12,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-12-12T01:15:12,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-12-12T01:15:12,953 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:15:12,953 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. 2024-12-12T01:15:12,959 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 1e4a47b0b955b8e01f81d0da6b552a65: 2024-12-12T01:15:12,959 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 83614ff2025743f7080c6ab5c0ee90ed: 2024-12-12T01:15:12,959 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-12T01:15:12,959 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-12T01:15:12,960 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:12,960 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:12,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:15:12,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:15:12,966 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:15:12,966 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:15:12,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741847_1023 (size=86) 2024-12-12T01:15:12,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741847_1023 (size=86) 2024-12-12T01:15:12,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741846_1022 (size=86) 2024-12-12T01:15:12,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741847_1023 (size=86) 2024-12-12T01:15:12,985 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. 2024-12-12T01:15:12,987 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:15:12,987 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-12T01:15:12,987 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-12T01:15:12,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741846_1022 (size=86) 2024-12-12T01:15:12,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741846_1022 (size=86) 2024-12-12T01:15:12,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-12-12T01:15:12,990 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:12,991 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:12,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-12T01:15:12,991 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:12,991 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:12,997 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1e4a47b0b955b8e01f81d0da6b552a65 in 209 msec 2024-12-12T01:15:13,000 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-12-12T01:15:13,000 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 83614ff2025743f7080c6ab5c0ee90ed in 209 msec 2024-12-12T01:15:13,000 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:15:13,003 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:15:13,008 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:15:13,008 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:13,012 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:13,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-12T01:15:13,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741848_1024 (size=597) 2024-12-12T01:15:13,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741848_1024 (size=597) 2024-12-12T01:15:13,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741848_1024 (size=597) 2024-12-12T01:15:13,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-12T01:15:13,459 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:15:13,479 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:15:13,481 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:13,485 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:15:13,486 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-12T01:15:13,489 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 781 msec 2024-12-12T01:15:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-12T01:15:13,856 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-12T01:15:13,872 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='0a39c7f5ceedc2c342f947dba344bf0d8', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:15:13,875 DEBUG [Time-limited test {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='1211eba20300fa98c0bac7137c8537e2b', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:15:13,911 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:15:13,912 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:15:13,918 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34598, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:15:13,919 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60098, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:15:13,920 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:15:13,932 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33883 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:15:13,939 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-12T01:15:13,947 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:13,948 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:15:13,949 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:15:13,952 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-12T01:15:13,968 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-12T01:15:13,978 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-12T01:15:13,982 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-12T01:15:13,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966113982 (current time:1733966113982). 2024-12-12T01:15:13,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:15:13,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-12T01:15:13,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:15:13,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cb871a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:13,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:15:13,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:15:13,984 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:15:13,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:15:13,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:15:13,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@560f8da9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:13,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:15:13,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:15:13,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:13,986 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57550, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:15:13,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11de1528, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:13,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:15:13,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:15:13,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:15:13,991 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58900, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:15:13,992 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:15:13,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:15:13,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:13,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:13,993 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:15:13,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74d34d48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:13,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:15:13,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:15:13,995 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:15:13,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:15:13,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:15:13,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e1f9496, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:13,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:15:13,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:15:13,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:13,998 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57564, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:15:13,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26d67f76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:14,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:15:14,003 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:15:14,004 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:15:14,005 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58912, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:15:14,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:15:14,012 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:15:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:15:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-12T01:15:14,012 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:15:14,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:15:14,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-12T01:15:14,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-12T01:15:14,018 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:15:14,024 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:15:14,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-12T01:15:14,033 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:15:14,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741849_1025 (size=210) 2024-12-12T01:15:14,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741849_1025 (size=210) 2024-12-12T01:15:14,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741849_1025 (size=210) 2024-12-12T01:15:14,116 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:15:14,117 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1e4a47b0b955b8e01f81d0da6b552a65}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83614ff2025743f7080c6ab5c0ee90ed}] 2024-12-12T01:15:14,119 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:14,119 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:14,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-12T01:15:14,274 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-12-12T01:15:14,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-12-12T01:15:14,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. 2024-12-12T01:15:14,292 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 83614ff2025743f7080c6ab5c0ee90ed 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-12T01:15:14,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:15:14,301 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 1e4a47b0b955b8e01f81d0da6b552a65 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-12T01:15:14,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-12T01:15:14,462 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed/.tmp/cf/23c746c74b7740b693d3925258dcffad is 71, key is 1d7ef530e0bb9b46aa7bc24331d80018/cf:q/1733966113931/Put/seqid=0 2024-12-12T01:15:14,471 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65/.tmp/cf/061716702899460e97581ebc7f065e27 is 71, key is 05a9f572d236d5869df722a171793d6e/cf:q/1733966113919/Put/seqid=0 2024-12-12T01:15:14,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741851_1027 (size=8190) 2024-12-12T01:15:14,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741851_1027 (size=8190) 2024-12-12T01:15:14,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741851_1027 (size=8190) 2024-12-12T01:15:14,577 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed/.tmp/cf/23c746c74b7740b693d3925258dcffad 2024-12-12T01:15:14,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741850_1026 (size=5422) 2024-12-12T01:15:14,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741850_1026 (size=5422) 2024-12-12T01:15:14,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741850_1026 (size=5422) 2024-12-12T01:15:14,635 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65/.tmp/cf/061716702899460e97581ebc7f065e27 2024-12-12T01:15:14,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-12T01:15:14,776 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65/.tmp/cf/061716702899460e97581ebc7f065e27 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65/cf/061716702899460e97581ebc7f065e27 2024-12-12T01:15:14,789 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed/.tmp/cf/23c746c74b7740b693d3925258dcffad as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed/cf/23c746c74b7740b693d3925258dcffad 2024-12-12T01:15:14,800 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65/cf/061716702899460e97581ebc7f065e27, entries=5, sequenceid=6, filesize=5.3 K 2024-12-12T01:15:14,811 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 1e4a47b0b955b8e01f81d0da6b552a65 in 504ms, sequenceid=6, compaction requested=false 2024-12-12T01:15:14,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-12T01:15:14,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 1e4a47b0b955b8e01f81d0da6b552a65: 2024-12-12T01:15:14,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-12T01:15:14,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:14,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:15:14,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65/cf/061716702899460e97581ebc7f065e27] hfiles 2024-12-12T01:15:14,818 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65/cf/061716702899460e97581ebc7f065e27 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:14,829 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed/cf/23c746c74b7740b693d3925258dcffad, entries=45, sequenceid=6, filesize=8.0 K 2024-12-12T01:15:14,831 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 83614ff2025743f7080c6ab5c0ee90ed in 545ms, sequenceid=6, compaction requested=false 2024-12-12T01:15:14,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 83614ff2025743f7080c6ab5c0ee90ed: 2024-12-12T01:15:14,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-12T01:15:14,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:14,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:15:14,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed/cf/23c746c74b7740b693d3925258dcffad] hfiles 2024-12-12T01:15:14,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed/cf/23c746c74b7740b693d3925258dcffad for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:15,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741853_1029 (size=125) 2024-12-12T01:15:15,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741853_1029 (size=125) 2024-12-12T01:15:15,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741853_1029 (size=125) 2024-12-12T01:15:15,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. 2024-12-12T01:15:15,009 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-12T01:15:15,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-12-12T01:15:15,011 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:15,011 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:15:15,018 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 83614ff2025743f7080c6ab5c0ee90ed in 896 msec 2024-12-12T01:15:15,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741852_1028 (size=125) 2024-12-12T01:15:15,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741852_1028 (size=125) 2024-12-12T01:15:15,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741852_1028 (size=125) 2024-12-12T01:15:15,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:15:15,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-12-12T01:15:15,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-12-12T01:15:15,070 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:15,070 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:15:15,081 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=15 2024-12-12T01:15:15,081 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1e4a47b0b955b8e01f81d0da6b552a65 in 956 msec 2024-12-12T01:15:15,081 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:15:15,083 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:15:15,085 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:15:15,085 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:15,087 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:15,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-12T01:15:15,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741854_1030 (size=675) 2024-12-12T01:15:15,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741854_1030 (size=675) 2024-12-12T01:15:15,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741854_1030 (size=675) 2024-12-12T01:15:15,277 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:15:15,350 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:15:15,351 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:15,355 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:15:15,355 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-12T01:15:15,360 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 1.3410 sec 2024-12-12T01:15:16,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-12T01:15:16,176 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-12T01:15:16,331 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T01:15:16,362 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T01:15:16,369 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T01:15:16,370 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60102, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T01:15:16,371 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33883 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-12T01:15:16,377 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34612, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T01:15:16,378 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38295 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-12T01:15:16,380 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58926, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T01:15:16,381 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44743 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-12T01:15:16,385 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T01:15:16,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:16,389 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T01:15:16,390 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:16,390 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-12-12T01:15:16,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-12T01:15:16,393 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T01:15:16,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741855_1031 (size=390) 2024-12-12T01:15:16,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741855_1031 (size=390) 2024-12-12T01:15:16,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741855_1031 (size=390) 2024-12-12T01:15:16,504 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d479e027beb0f7e53a821180da4a387f, NAME => 'testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:15:16,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-12T01:15:16,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741856_1032 (size=75) 2024-12-12T01:15:16,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741856_1032 (size=75) 2024-12-12T01:15:16,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741856_1032 (size=75) 2024-12-12T01:15:16,562 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:15:16,562 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing d479e027beb0f7e53a821180da4a387f, disabling compactions & flushes 2024-12-12T01:15:16,562 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f. 2024-12-12T01:15:16,562 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f. 2024-12-12T01:15:16,562 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f. after waiting 0 ms 2024-12-12T01:15:16,562 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f. 2024-12-12T01:15:16,562 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f. 2024-12-12T01:15:16,563 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for d479e027beb0f7e53a821180da4a387f: Waiting for close lock at 1733966116562Disabling compacts and flushes for region at 1733966116562Disabling writes for close at 1733966116562Writing region close event to WAL at 1733966116562Closed at 1733966116562 2024-12-12T01:15:16,567 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T01:15:16,568 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733966116567"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966116567"}]},"ts":"1733966116567"} 2024-12-12T01:15:16,573 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-12T01:15:16,575 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T01:15:16,576 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966116575"}]},"ts":"1733966116575"} 2024-12-12T01:15:16,579 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-12T01:15:16,580 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:15:16,581 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:15:16,581 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:15:16,581 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:15:16,581 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:15:16,581 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:15:16,581 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:15:16,582 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:15:16,582 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:15:16,582 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:15:16,582 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:15:16,582 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d479e027beb0f7e53a821180da4a387f, ASSIGN}] 2024-12-12T01:15:16,584 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d479e027beb0f7e53a821180da4a387f, ASSIGN 2024-12-12T01:15:16,590 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d479e027beb0f7e53a821180da4a387f, ASSIGN; state=OFFLINE, location=36236b0d7605,38295,1733966101221; forceNewPlan=false, retain=false 2024-12-12T01:15:16,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-12T01:15:16,741 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-12T01:15:16,742 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=d479e027beb0f7e53a821180da4a387f, regionState=OPENING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:15:16,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d479e027beb0f7e53a821180da4a387f, ASSIGN because future has completed 2024-12-12T01:15:16,761 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure d479e027beb0f7e53a821180da4a387f, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:15:16,937 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f. 2024-12-12T01:15:16,938 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => d479e027beb0f7e53a821180da4a387f, NAME => 'testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f.', STARTKEY => '', ENDKEY => ''} 2024-12-12T01:15:16,939 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f. service=AccessControlService 2024-12-12T01:15:16,939 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:15:16,939 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion d479e027beb0f7e53a821180da4a387f 2024-12-12T01:15:16,940 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:15:16,940 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for d479e027beb0f7e53a821180da4a387f 2024-12-12T01:15:16,940 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for d479e027beb0f7e53a821180da4a387f 2024-12-12T01:15:16,943 INFO [StoreOpener-d479e027beb0f7e53a821180da4a387f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d479e027beb0f7e53a821180da4a387f 2024-12-12T01:15:16,950 INFO [StoreOpener-d479e027beb0f7e53a821180da4a387f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d479e027beb0f7e53a821180da4a387f columnFamilyName cf 2024-12-12T01:15:16,951 DEBUG [StoreOpener-d479e027beb0f7e53a821180da4a387f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:16,956 INFO [StoreOpener-d479e027beb0f7e53a821180da4a387f-1 {}] regionserver.HStore(327): Store=d479e027beb0f7e53a821180da4a387f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:15:16,957 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for d479e027beb0f7e53a821180da4a387f 2024-12-12T01:15:16,959 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f 2024-12-12T01:15:16,960 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f 2024-12-12T01:15:16,961 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for d479e027beb0f7e53a821180da4a387f 2024-12-12T01:15:16,961 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for d479e027beb0f7e53a821180da4a387f 2024-12-12T01:15:16,964 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for d479e027beb0f7e53a821180da4a387f 2024-12-12T01:15:16,975 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:15:16,976 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened d479e027beb0f7e53a821180da4a387f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61502346, jitterRate=-0.08354362845420837}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:15:16,976 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d479e027beb0f7e53a821180da4a387f 2024-12-12T01:15:16,977 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for d479e027beb0f7e53a821180da4a387f: Running coprocessor pre-open hook at 1733966116940Writing region info on filesystem at 1733966116940Initializing all the Stores at 1733966116943 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966116943Cleaning up temporary data from old regions at 1733966116961 (+18 ms)Running coprocessor post-open hooks at 1733966116976 (+15 ms)Region opened successfully at 1733966116977 (+1 ms) 2024-12-12T01:15:16,980 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f., pid=20, masterSystemTime=1733966116922 2024-12-12T01:15:16,985 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f. 2024-12-12T01:15:16,985 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f. 2024-12-12T01:15:16,986 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=d479e027beb0f7e53a821180da4a387f, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:15:16,991 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure d479e027beb0f7e53a821180da4a387f, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:15:16,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-12-12T01:15:16,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure d479e027beb0f7e53a821180da4a387f, server=36236b0d7605,38295,1733966101221 in 233 msec 2024-12-12T01:15:17,004 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-12-12T01:15:17,004 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d479e027beb0f7e53a821180da4a387f, ASSIGN in 417 msec 2024-12-12T01:15:17,006 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T01:15:17,007 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966117006"}]},"ts":"1733966117006"} 2024-12-12T01:15:17,011 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-12T01:15:17,013 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T01:15:17,014 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-12T01:15:17,020 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-12T01:15:17,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:15:17,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:15:17,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:15:17,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:15:17,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-12T01:15:17,027 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:15:17,027 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:15:17,028 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:15:17,028 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:15:17,029 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:15:17,029 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:15:17,029 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:15:17,029 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:15:17,035 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 643 msec 2024-12-12T01:15:17,456 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:15:17,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-12T01:15:17,536 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-12T01:15:17,537 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T01:15:17,541 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T01:15:20,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741857_1033 (size=134217728) 2024-12-12T01:15:20,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741857_1033 (size=134217728) 2024-12-12T01:15:20,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741857_1033 (size=134217728) 2024-12-12T01:15:20,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:20,712 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-12T01:15:20,715 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:20,715 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-12T01:15:21,919 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-12T01:15:22,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741858_1034 (size=134217728) 2024-12-12T01:15:22,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741858_1034 (size=134217728) 2024-12-12T01:15:22,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741858_1034 (size=134217728) 2024-12-12T01:15:22,443 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:15:22,676 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1733966117560/Put/seqid=0 2024-12-12T01:15:22,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741859_1035 (size=51979256) 2024-12-12T01:15:22,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741859_1035 (size=51979256) 2024-12-12T01:15:22,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741859_1035 (size=51979256) 2024-12-12T01:15:22,691 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15bee7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:22,691 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:15:22,691 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:15:22,696 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:15:22,696 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:15:22,697 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:15:22,697 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1442900c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:22,697 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:15:22,698 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:15:22,717 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:22,719 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56812, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:15:22,721 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@701c7667, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:22,722 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:15:22,725 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:15:22,726 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:15:22,729 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46852, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:15:22,744 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:33353/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-12T01:15:22,744 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-12T01:15:22,746 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncConnectionImpl(321): The fetched master address is 36236b0d7605,36083,1733966100114 2024-12-12T01:15:22,746 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@52ad950b 2024-12-12T01:15:22,747 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T01:15:22,749 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56818, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T01:15:22,758 WARN [IPC Server handler 2 on default port 33353 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-12T01:15:22,766 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:15:22,770 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:15:22,793 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58794, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:15:22,799 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-12T01:15:22,829 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:33353/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-12-12T01:15:22,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:15:22,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:15:22,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:15:22,861 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57201, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-12T01:15:22,862 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44743 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-12T01:15:22,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44743 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.2:57201 deadline: 1733966182861, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-12-12T01:15:22,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T01:15:22,872 WARN [IPC Server handler 2 on default port 33353 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-12T01:15:22,900 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:33353/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/output/cf/test_file for inclusion in d479e027beb0f7e53a821180da4a387f/cf 2024-12-12T01:15:22,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-12-12T01:15:22,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-12T01:15:22,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:33353/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-12T01:15:22,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HRegion(2603): Flush status journal for d479e027beb0f7e53a821180da4a387f: 2024-12-12T01:15:22,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:33353/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/output/cf/test_file to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/staging/jenkins__testExportFileSystemStateWithSplitRegion__38ah3l4r8igspqqqsdlt6cdtfq7sl6acl1qmdoesie2vpe6qki83322rkjn1sqbm/cf/test_file 2024-12-12T01:15:22,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/staging/jenkins__testExportFileSystemStateWithSplitRegion__38ah3l4r8igspqqqsdlt6cdtfq7sl6acl1qmdoesie2vpe6qki83322rkjn1sqbm/cf/test_file as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_ 2024-12-12T01:15:22,930 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/staging/jenkins__testExportFileSystemStateWithSplitRegion__38ah3l4r8igspqqqsdlt6cdtfq7sl6acl1qmdoesie2vpe6qki83322rkjn1sqbm/cf/test_file into d479e027beb0f7e53a821180da4a387f/cf as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_ - updating store file list. 2024-12-12T01:15:22,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-12T01:15:22,944 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_ into d479e027beb0f7e53a821180da4a387f/cf 2024-12-12T01:15:22,944 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/staging/jenkins__testExportFileSystemStateWithSplitRegion__38ah3l4r8igspqqqsdlt6cdtfq7sl6acl1qmdoesie2vpe6qki83322rkjn1sqbm/cf/test_file into d479e027beb0f7e53a821180da4a387f/cf (new location: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_) 2024-12-12T01:15:22,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/staging/jenkins__testExportFileSystemStateWithSplitRegion__38ah3l4r8igspqqqsdlt6cdtfq7sl6acl1qmdoesie2vpe6qki83322rkjn1sqbm/cf/test_file 2024-12-12T01:15:22,956 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-12T01:15:22,957 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T01:15:22,957 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:22,958 DEBUG [RPCClient-NioEventLoopGroup-6-14 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f., hostname=36236b0d7605,38295,1733966101221, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f., hostname=36236b0d7605,38295,1733966101221, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=36236b0d7605:38295 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-12T01:15:22,958 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:15:22,959 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:22,959 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-12T01:15:22,959 DEBUG [RPCClient-NioEventLoopGroup-6-14 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f., hostname=36236b0d7605,38295,1733966101221, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-12T01:15:22,959 DEBUG [RPCClient-NioEventLoopGroup-6-14 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f., hostname=36236b0d7605,38295,1733966101221, seqNum=2 from cache 2024-12-12T01:15:22,960 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] util.NettyFutureUtils(65): IO operation failed org.apache.hbase.thirdparty.io.netty.channel.StacklessClosedChannelException: null at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AbstractUnsafe.write(Object, ChannelPromise)(Unknown Source) ~[hbase-shaded-netty-4.1.9.jar:?] 2024-12-12T01:15:22,968 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:15:22,981 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.2 split testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f. 2024-12-12T01:15:22,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=36236b0d7605,38295,1733966101221 2024-12-12T01:15:23,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=d479e027beb0f7e53a821180da4a387f, daughterA=6d1c4885b300985f9d6d359b50722b5d, daughterB=92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:23,001 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=d479e027beb0f7e53a821180da4a387f, daughterA=6d1c4885b300985f9d6d359b50722b5d, daughterB=92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:23,001 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=d479e027beb0f7e53a821180da4a387f, daughterA=6d1c4885b300985f9d6d359b50722b5d, daughterB=92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:23,001 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=d479e027beb0f7e53a821180da4a387f, daughterA=6d1c4885b300985f9d6d359b50722b5d, daughterB=92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:23,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-12T01:15:23,009 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d479e027beb0f7e53a821180da4a387f, UNASSIGN}] 2024-12-12T01:15:23,011 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d479e027beb0f7e53a821180da4a387f, UNASSIGN 2024-12-12T01:15:23,013 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=d479e027beb0f7e53a821180da4a387f, regionState=CLOSING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:15:23,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d479e027beb0f7e53a821180da4a387f, UNASSIGN because future has completed 2024-12-12T01:15:23,018 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-12T01:15:23,019 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure d479e027beb0f7e53a821180da4a387f, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:15:23,066 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=36236b0d7605:44743 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 34 more 2024-12-12T01:15:23,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-12T01:15:23,185 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close d479e027beb0f7e53a821180da4a387f 2024-12-12T01:15:23,185 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-12T01:15:23,186 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing d479e027beb0f7e53a821180da4a387f, disabling compactions & flushes 2024-12-12T01:15:23,186 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f. 2024-12-12T01:15:23,186 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f. 2024-12-12T01:15:23,186 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f. after waiting 0 ms 2024-12-12T01:15:23,186 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f. 2024-12-12T01:15:23,205 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-12-12T01:15:23,210 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:15:23,210 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f. 2024-12-12T01:15:23,211 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for d479e027beb0f7e53a821180da4a387f: Waiting for close lock at 1733966123186Running coprocessor pre-close hooks at 1733966123186Disabling compacts and flushes for region at 1733966123186Disabling writes for close at 1733966123186Writing region close event to WAL at 1733966123192 (+6 ms)Running coprocessor post-close hooks at 1733966123206 (+14 ms)Closed at 1733966123210 (+4 ms) 2024-12-12T01:15:23,215 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed d479e027beb0f7e53a821180da4a387f 2024-12-12T01:15:23,216 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=d479e027beb0f7e53a821180da4a387f, regionState=CLOSED 2024-12-12T01:15:23,219 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure d479e027beb0f7e53a821180da4a387f, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:15:23,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-12-12T01:15:23,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure d479e027beb0f7e53a821180da4a387f, server=36236b0d7605,38295,1733966101221 in 202 msec 2024-12-12T01:15:23,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-12-12T01:15:23,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=d479e027beb0f7e53a821180da4a387f, UNASSIGN in 215 msec 2024-12-12T01:15:23,243 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:23,246 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=d479e027beb0f7e53a821180da4a387f, threads=1 2024-12-12T01:15:23,249 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_ for region: d479e027beb0f7e53a821180da4a387f 2024-12-12T01:15:23,267 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-12T01:15:23,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741860_1036 (size=21) 2024-12-12T01:15:23,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741860_1036 (size=21) 2024-12-12T01:15:23,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741860_1036 (size=21) 2024-12-12T01:15:23,305 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-12T01:15:23,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741861_1037 (size=21) 2024-12-12T01:15:23,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741861_1037 (size=21) 2024-12-12T01:15:23,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741861_1037 (size=21) 2024-12-12T01:15:23,319 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_ for region: d479e027beb0f7e53a821180da4a387f 2024-12-12T01:15:23,321 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region d479e027beb0f7e53a821180da4a387f Daughter A: [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_.d479e027beb0f7e53a821180da4a387f] storefiles, Daughter B: [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_.d479e027beb0f7e53a821180da4a387f] storefiles. 2024-12-12T01:15:23,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-12T01:15:23,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741862_1038 (size=76) 2024-12-12T01:15:23,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741862_1038 (size=76) 2024-12-12T01:15:23,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741862_1038 (size=76) 2024-12-12T01:15:23,344 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:23,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741863_1039 (size=76) 2024-12-12T01:15:23,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741863_1039 (size=76) 2024-12-12T01:15:23,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741863_1039 (size=76) 2024-12-12T01:15:23,376 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:23,392 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-12T01:15:23,396 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-12T01:15:23,401 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733966123400"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1733966123400"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1733966123400"}]},"ts":"1733966123400"} 2024-12-12T01:15:23,401 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733966123400"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966123400"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733966123400"}]},"ts":"1733966123400"} 2024-12-12T01:15:23,401 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733966123400"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966123400"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733966123400"}]},"ts":"1733966123400"} 2024-12-12T01:15:23,418 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6d1c4885b300985f9d6d359b50722b5d, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=92d957b63c4b0d6beabf921ed8c70c65, ASSIGN}] 2024-12-12T01:15:23,420 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6d1c4885b300985f9d6d359b50722b5d, ASSIGN 2024-12-12T01:15:23,420 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=92d957b63c4b0d6beabf921ed8c70c65, ASSIGN 2024-12-12T01:15:23,421 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6d1c4885b300985f9d6d359b50722b5d, ASSIGN; state=SPLITTING_NEW, location=36236b0d7605,38295,1733966101221; forceNewPlan=false, retain=false 2024-12-12T01:15:23,422 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=92d957b63c4b0d6beabf921ed8c70c65, ASSIGN; state=SPLITTING_NEW, location=36236b0d7605,38295,1733966101221; forceNewPlan=false, retain=false 2024-12-12T01:15:23,572 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T01:15:23,573 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=6d1c4885b300985f9d6d359b50722b5d, regionState=OPENING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:15:23,573 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=92d957b63c4b0d6beabf921ed8c70c65, regionState=OPENING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:15:23,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6d1c4885b300985f9d6d359b50722b5d, ASSIGN because future has completed 2024-12-12T01:15:23,576 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6d1c4885b300985f9d6d359b50722b5d, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:15:23,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=92d957b63c4b0d6beabf921ed8c70c65, ASSIGN because future has completed 2024-12-12T01:15:23,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 92d957b63c4b0d6beabf921ed8c70c65, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:15:23,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-12T01:15:23,736 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d. 2024-12-12T01:15:23,736 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => 6d1c4885b300985f9d6d359b50722b5d, NAME => 'testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d.', STARTKEY => '', ENDKEY => '5'} 2024-12-12T01:15:23,737 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d. service=AccessControlService 2024-12-12T01:15:23,737 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:15:23,737 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:15:23,737 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:15:23,737 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for 6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:15:23,737 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for 6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:15:23,740 INFO [StoreOpener-6d1c4885b300985f9d6d359b50722b5d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:15:23,741 INFO [StoreOpener-6d1c4885b300985f9d6d359b50722b5d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6d1c4885b300985f9d6d359b50722b5d columnFamilyName cf 2024-12-12T01:15:23,741 DEBUG [StoreOpener-6d1c4885b300985f9d6d359b50722b5d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:23,755 DEBUG [StoreFileOpener-6d1c4885b300985f9d6d359b50722b5d-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_.d479e027beb0f7e53a821180da4a387f: NONE, but ROW specified in column family configuration 2024-12-12T01:15:23,776 DEBUG [StoreOpener-6d1c4885b300985f9d6d359b50722b5d-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_.d479e027beb0f7e53a821180da4a387f->hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_-bottom 2024-12-12T01:15:23,777 INFO [StoreOpener-6d1c4885b300985f9d6d359b50722b5d-1 {}] regionserver.HStore(327): Store=6d1c4885b300985f9d6d359b50722b5d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:15:23,777 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for 6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:15:23,779 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:15:23,781 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:15:23,782 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for 6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:15:23,782 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for 6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:15:23,786 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for 6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:15:23,787 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened 6d1c4885b300985f9d6d359b50722b5d; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70169773, jitterRate=0.045611098408699036}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:15:23,787 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:15:23,788 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for 6d1c4885b300985f9d6d359b50722b5d: Running coprocessor pre-open hook at 1733966123738Writing region info on filesystem at 1733966123738Initializing all the Stores at 1733966123739 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966123739Cleaning up temporary data from old regions at 1733966123782 (+43 ms)Running coprocessor post-open hooks at 1733966123787 (+5 ms)Region opened successfully at 1733966123788 (+1 ms) 2024-12-12T01:15:23,790 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d., pid=26, masterSystemTime=1733966123730 2024-12-12T01:15:23,791 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d.,because compaction is disabled. 2024-12-12T01:15:23,794 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d. 2024-12-12T01:15:23,795 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d. 2024-12-12T01:15:23,795 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65. 2024-12-12T01:15:23,796 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => 92d957b63c4b0d6beabf921ed8c70c65, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65.', STARTKEY => '5', ENDKEY => ''} 2024-12-12T01:15:23,796 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=6d1c4885b300985f9d6d359b50722b5d, regionState=OPEN, openSeqNum=7, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:15:23,796 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65. service=AccessControlService 2024-12-12T01:15:23,796 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:15:23,797 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:23,797 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:15:23,797 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for 92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:23,797 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for 92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:23,800 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6d1c4885b300985f9d6d359b50722b5d, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:15:23,800 INFO [StoreOpener-92d957b63c4b0d6beabf921ed8c70c65-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:23,802 INFO [StoreOpener-92d957b63c4b0d6beabf921ed8c70c65-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 92d957b63c4b0d6beabf921ed8c70c65 columnFamilyName cf 2024-12-12T01:15:23,802 DEBUG [StoreOpener-92d957b63c4b0d6beabf921ed8c70c65-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:23,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=24 2024-12-12T01:15:23,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure 6d1c4885b300985f9d6d359b50722b5d, server=36236b0d7605,38295,1733966101221 in 226 msec 2024-12-12T01:15:23,811 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6d1c4885b300985f9d6d359b50722b5d, ASSIGN in 388 msec 2024-12-12T01:15:23,827 DEBUG [StoreFileOpener-92d957b63c4b0d6beabf921ed8c70c65-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_.d479e027beb0f7e53a821180da4a387f: NONE, but ROW specified in column family configuration 2024-12-12T01:15:23,830 DEBUG [StoreOpener-92d957b63c4b0d6beabf921ed8c70c65-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_.d479e027beb0f7e53a821180da4a387f->hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_-top 2024-12-12T01:15:23,830 INFO [StoreOpener-92d957b63c4b0d6beabf921ed8c70c65-1 {}] regionserver.HStore(327): Store=92d957b63c4b0d6beabf921ed8c70c65/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:15:23,830 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for 92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:23,832 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:23,834 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:23,835 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for 92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:23,835 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for 92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:23,838 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for 92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:23,839 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened 92d957b63c4b0d6beabf921ed8c70c65; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72453297, jitterRate=0.07963825762271881}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:15:23,840 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:23,840 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for 92d957b63c4b0d6beabf921ed8c70c65: Running coprocessor pre-open hook at 1733966123797Writing region info on filesystem at 1733966123797Initializing all the Stores at 1733966123799 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966123799Cleaning up temporary data from old regions at 1733966123835 (+36 ms)Running coprocessor post-open hooks at 1733966123840 (+5 ms)Region opened successfully at 1733966123840 2024-12-12T01:15:23,841 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65., pid=27, masterSystemTime=1733966123730 2024-12-12T01:15:23,841 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65.,because compaction is disabled. 2024-12-12T01:15:23,848 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65. 2024-12-12T01:15:23,848 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65. 2024-12-12T01:15:23,848 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=92d957b63c4b0d6beabf921ed8c70c65, regionState=OPEN, openSeqNum=7, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:15:23,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 92d957b63c4b0d6beabf921ed8c70c65, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:15:23,863 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=25 2024-12-12T01:15:23,864 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure 92d957b63c4b0d6beabf921ed8c70c65, server=36236b0d7605,38295,1733966101221 in 279 msec 2024-12-12T01:15:23,867 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=21 2024-12-12T01:15:23,867 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=92d957b63c4b0d6beabf921ed8c70c65, ASSIGN in 444 msec 2024-12-12T01:15:23,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=d479e027beb0f7e53a821180da4a387f, daughterA=6d1c4885b300985f9d6d359b50722b5d, daughterB=92d957b63c4b0d6beabf921ed8c70c65 in 874 msec 2024-12-12T01:15:24,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-12T01:15:24,145 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-12T01:15:24,145 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-12T01:15:24,152 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-12T01:15:24,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966124152 (current time:1733966124152). 2024-12-12T01:15:24,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:15:24,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-12T01:15:24,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:15:24,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71d15ac0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:24,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:15:24,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:15:24,156 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:15:24,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:15:24,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:15:24,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a5c9dd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:24,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:15:24,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:15:24,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:24,158 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56832, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:15:24,159 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29dac372, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:24,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:15:24,160 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:15:24,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:15:24,162 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46862, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:15:24,164 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:15:24,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:15:24,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:24,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:24,164 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:15:24,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7dda0d8e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:24,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:15:24,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:15:24,166 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:15:24,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:15:24,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:15:24,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a60ed94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:24,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:15:24,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:15:24,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:24,169 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56848, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:15:24,170 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1708c580, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:15:24,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:15:24,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:15:24,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:15:24,173 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46868, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:15:24,176 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:15:24,178 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:15:24,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:15:24,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:24,178 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:15:24,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:15:24,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-12T01:15:24,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:15:24,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-12T01:15:24,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-12T01:15:24,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-12T01:15:24,183 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:15:24,184 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:15:24,189 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:15:24,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741864_1040 (size=197) 2024-12-12T01:15:24,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741864_1040 (size=197) 2024-12-12T01:15:24,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741864_1040 (size=197) 2024-12-12T01:15:24,214 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:15:24,215 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d1c4885b300985f9d6d359b50722b5d}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 92d957b63c4b0d6beabf921ed8c70c65}] 2024-12-12T01:15:24,216 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:24,217 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:15:24,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-12T01:15:24,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-12-12T01:15:24,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65. 2024-12-12T01:15:24,370 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-12-12T01:15:24,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for 92d957b63c4b0d6beabf921ed8c70c65: 2024-12-12T01:15:24,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-12T01:15:24,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d. 2024-12-12T01:15:24,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:24,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for 6d1c4885b300985f9d6d359b50722b5d: 2024-12-12T01:15:24,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:15:24,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-12T01:15:24,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_.d479e027beb0f7e53a821180da4a387f->hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_-top] hfiles 2024-12-12T01:15:24,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_.d479e027beb0f7e53a821180da4a387f for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:24,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:24,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:15:24,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_.d479e027beb0f7e53a821180da4a387f->hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_-bottom] hfiles 2024-12-12T01:15:24,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_.d479e027beb0f7e53a821180da4a387f for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:24,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741866_1042 (size=182) 2024-12-12T01:15:24,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741866_1042 (size=182) 2024-12-12T01:15:24,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741866_1042 (size=182) 2024-12-12T01:15:24,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741865_1041 (size=182) 2024-12-12T01:15:24,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741865_1041 (size=182) 2024-12-12T01:15:24,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d. 2024-12-12T01:15:24,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-12T01:15:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-12-12T01:15:24,393 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:15:24,393 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:15:24,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741865_1041 (size=182) 2024-12-12T01:15:24,396 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6d1c4885b300985f9d6d359b50722b5d in 179 msec 2024-12-12T01:15:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-12T01:15:24,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65. 2024-12-12T01:15:24,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-12-12T01:15:24,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-12-12T01:15:24,794 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:24,794 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:15:24,803 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=28 2024-12-12T01:15:24,803 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 92d957b63c4b0d6beabf921ed8c70c65 in 583 msec 2024-12-12T01:15:24,804 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:15:24,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-12T01:15:24,809 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-12T01:15:24,809 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-12T01:15:24,810 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:15:24,811 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_] hfiles 2024-12-12T01:15:24,811 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_ 2024-12-12T01:15:24,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741867_1043 (size=129) 2024-12-12T01:15:24,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741867_1043 (size=129) 2024-12-12T01:15:24,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741867_1043 (size=129) 2024-12-12T01:15:24,832 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => d479e027beb0f7e53a821180da4a387f, NAME => 'testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:24,834 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:15:24,835 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:15:24,835 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:24,836 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:24,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741868_1044 (size=891) 2024-12-12T01:15:24,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741868_1044 (size=891) 2024-12-12T01:15:24,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741868_1044 (size=891) 2024-12-12T01:15:24,883 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:15:24,912 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:15:24,913 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:24,916 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:15:24,916 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-12T01:15:24,929 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 736 msec 2024-12-12T01:15:25,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-12T01:15:25,316 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-12T01:15:25,317 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966125317 2024-12-12T01:15:25,317 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33353, tgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966125317, rawTgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966125317, srcFsUri=hdfs://localhost:33353, srcDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:15:25,415 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33353, inputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:15:25,415 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966125317, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966125317/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:25,422 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-12T01:15:25,437 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966125317/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-12T01:15:25,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741869_1045 (size=891) 2024-12-12T01:15:25,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741869_1045 (size=891) 2024-12-12T01:15:25,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741869_1045 (size=891) 2024-12-12T01:15:25,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741870_1046 (size=197) 2024-12-12T01:15:25,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741870_1046 (size=197) 2024-12-12T01:15:25,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741870_1046 (size=197) 2024-12-12T01:15:25,616 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:15:25,618 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:15:25,619 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:15:26,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-17754113328189950261.jar 2024-12-12T01:15:26,971 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:15:26,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:15:27,070 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-11071856210596861656.jar 2024-12-12T01:15:27,071 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:15:27,072 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:15:27,072 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:15:27,073 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:15:27,073 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:15:27,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:15:27,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T01:15:27,075 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T01:15:27,075 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T01:15:27,076 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T01:15:27,076 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T01:15:27,077 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T01:15:27,077 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T01:15:27,078 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T01:15:27,078 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T01:15:27,079 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T01:15:27,079 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T01:15:27,085 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:15:27,086 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:15:27,086 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:15:27,087 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:15:27,087 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:15:27,088 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:15:27,088 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:15:27,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741871_1047 (size=131440) 2024-12-12T01:15:27,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741871_1047 (size=131440) 2024-12-12T01:15:27,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741871_1047 (size=131440) 2024-12-12T01:15:27,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741872_1048 (size=4188619) 2024-12-12T01:15:27,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741872_1048 (size=4188619) 2024-12-12T01:15:27,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741872_1048 (size=4188619) 2024-12-12T01:15:27,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741873_1049 (size=1323991) 2024-12-12T01:15:27,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741873_1049 (size=1323991) 2024-12-12T01:15:27,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741873_1049 (size=1323991) 2024-12-12T01:15:27,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741874_1050 (size=903541) 2024-12-12T01:15:27,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741874_1050 (size=903541) 2024-12-12T01:15:27,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741874_1050 (size=903541) 2024-12-12T01:15:27,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741875_1051 (size=8360360) 2024-12-12T01:15:27,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741875_1051 (size=8360360) 2024-12-12T01:15:27,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741875_1051 (size=8360360) 2024-12-12T01:15:27,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741876_1052 (size=1877034) 2024-12-12T01:15:27,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741876_1052 (size=1877034) 2024-12-12T01:15:27,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741876_1052 (size=1877034) 2024-12-12T01:15:27,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741877_1053 (size=6426224) 2024-12-12T01:15:27,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741877_1053 (size=6426224) 2024-12-12T01:15:27,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741877_1053 (size=6426224) 2024-12-12T01:15:27,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741878_1054 (size=77835) 2024-12-12T01:15:27,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741878_1054 (size=77835) 2024-12-12T01:15:27,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741878_1054 (size=77835) 2024-12-12T01:15:27,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741879_1055 (size=30949) 2024-12-12T01:15:27,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741879_1055 (size=30949) 2024-12-12T01:15:27,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741879_1055 (size=30949) 2024-12-12T01:15:27,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741880_1056 (size=1597213) 2024-12-12T01:15:27,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741880_1056 (size=1597213) 2024-12-12T01:15:27,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741880_1056 (size=1597213) 2024-12-12T01:15:27,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741881_1057 (size=4695811) 2024-12-12T01:15:27,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741881_1057 (size=4695811) 2024-12-12T01:15:27,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741881_1057 (size=4695811) 2024-12-12T01:15:27,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741882_1058 (size=232957) 2024-12-12T01:15:27,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741882_1058 (size=232957) 2024-12-12T01:15:27,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741882_1058 (size=232957) 2024-12-12T01:15:27,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741883_1059 (size=127628) 2024-12-12T01:15:27,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741883_1059 (size=127628) 2024-12-12T01:15:27,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741883_1059 (size=127628) 2024-12-12T01:15:27,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741884_1060 (size=20406) 2024-12-12T01:15:27,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741884_1060 (size=20406) 2024-12-12T01:15:27,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741884_1060 (size=20406) 2024-12-12T01:15:27,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741885_1061 (size=5175431) 2024-12-12T01:15:27,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741885_1061 (size=5175431) 2024-12-12T01:15:27,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741885_1061 (size=5175431) 2024-12-12T01:15:27,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741886_1062 (size=217634) 2024-12-12T01:15:27,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741886_1062 (size=217634) 2024-12-12T01:15:27,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741886_1062 (size=217634) 2024-12-12T01:15:28,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741887_1063 (size=443172) 2024-12-12T01:15:28,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741887_1063 (size=443172) 2024-12-12T01:15:28,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741887_1063 (size=443172) 2024-12-12T01:15:28,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741888_1064 (size=1832290) 2024-12-12T01:15:28,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741888_1064 (size=1832290) 2024-12-12T01:15:28,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741888_1064 (size=1832290) 2024-12-12T01:15:28,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741889_1065 (size=322274) 2024-12-12T01:15:28,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741889_1065 (size=322274) 2024-12-12T01:15:28,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741889_1065 (size=322274) 2024-12-12T01:15:28,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741890_1066 (size=503880) 2024-12-12T01:15:28,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741890_1066 (size=503880) 2024-12-12T01:15:28,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741890_1066 (size=503880) 2024-12-12T01:15:28,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741891_1067 (size=29229) 2024-12-12T01:15:28,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741891_1067 (size=29229) 2024-12-12T01:15:28,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741891_1067 (size=29229) 2024-12-12T01:15:28,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741892_1068 (size=24096) 2024-12-12T01:15:28,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741892_1068 (size=24096) 2024-12-12T01:15:28,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741892_1068 (size=24096) 2024-12-12T01:15:28,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741893_1069 (size=111872) 2024-12-12T01:15:28,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741893_1069 (size=111872) 2024-12-12T01:15:28,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741893_1069 (size=111872) 2024-12-12T01:15:28,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741894_1070 (size=45609) 2024-12-12T01:15:28,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741894_1070 (size=45609) 2024-12-12T01:15:28,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741894_1070 (size=45609) 2024-12-12T01:15:28,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741895_1071 (size=136454) 2024-12-12T01:15:28,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741895_1071 (size=136454) 2024-12-12T01:15:28,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741895_1071 (size=136454) 2024-12-12T01:15:28,369 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T01:15:28,376 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-12-12T01:15:28,384 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=d479e027beb0f7e53a821180da4a387f-329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_. 2024-12-12T01:15:28,384 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=d479e027beb0f7e53a821180da4a387f-329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_. 2024-12-12T01:15:28,385 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-12-12T01:15:28,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741896_1072 (size=244) 2024-12-12T01:15:28,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741896_1072 (size=244) 2024-12-12T01:15:28,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741896_1072 (size=244) 2024-12-12T01:15:28,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741897_1073 (size=17) 2024-12-12T01:15:28,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741897_1073 (size=17) 2024-12-12T01:15:28,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741897_1073 (size=17) 2024-12-12T01:15:28,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741898_1074 (size=304058) 2024-12-12T01:15:28,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741898_1074 (size=304058) 2024-12-12T01:15:28,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741898_1074 (size=304058) 2024-12-12T01:15:28,714 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:15:28,907 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:15:28,961 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:15:28,962 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:15:29,447 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0001_000001 (auth:SIMPLE) from 127.0.0.1:47328 2024-12-12T01:15:38,287 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0001_000001 (auth:SIMPLE) from 127.0.0.1:39238 2024-12-12T01:15:38,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741899_1075 (size=349756) 2024-12-12T01:15:38,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741899_1075 (size=349756) 2024-12-12T01:15:38,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741899_1075 (size=349756) 2024-12-12T01:15:40,681 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0001_000001 (auth:SIMPLE) from 127.0.0.1:55686 2024-12-12T01:15:46,962 INFO [master/36236b0d7605:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-12T01:15:46,962 INFO [master/36236b0d7605:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-12T01:15:56,952 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 83614ff2025743f7080c6ab5c0ee90ed, had cached 0 bytes from a total of 8190 2024-12-12T01:15:56,953 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1e4a47b0b955b8e01f81d0da6b552a65, had cached 0 bytes from a total of 5422 2024-12-12T01:15:58,908 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:16:04,731 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 83614ff2025743f7080c6ab5c0ee90ed changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:16:04,731 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1e4a47b0b955b8e01f81d0da6b552a65 changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:16:04,732 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region a76413eb65b7bad8091ed755bedda4fd changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:16:08,737 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6d1c4885b300985f9d6d359b50722b5d, had cached 0 bytes from a total of 320414712 2024-12-12T01:16:08,797 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 92d957b63c4b0d6beabf921ed8c70c65, had cached 0 bytes from a total of 320414712 2024-12-12T01:16:25,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741900_1076 (size=134217728) 2024-12-12T01:16:25,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741900_1076 (size=134217728) 2024-12-12T01:16:25,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741900_1076 (size=134217728) 2024-12-12T01:16:28,908 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:16:41,953 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 83614ff2025743f7080c6ab5c0ee90ed, had cached 0 bytes from a total of 8190 2024-12-12T01:16:41,953 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1e4a47b0b955b8e01f81d0da6b552a65, had cached 0 bytes from a total of 5422 2024-12-12T01:16:53,738 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6d1c4885b300985f9d6d359b50722b5d, had cached 0 bytes from a total of 320414712 2024-12-12T01:16:53,797 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 92d957b63c4b0d6beabf921ed8c70c65, had cached 0 bytes from a total of 320414712 2024-12-12T01:16:58,908 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:17:03,420 WARN [regionserver/36236b0d7605:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 2, running: 1 2024-12-12T01:17:04,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741901_1077 (size=134217728) 2024-12-12T01:17:04,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741901_1077 (size=134217728) 2024-12-12T01:17:04,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741901_1077 (size=134217728) 2024-12-12T01:17:18,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741902_1078 (size=51979256) 2024-12-12T01:17:18,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741902_1078 (size=51979256) 2024-12-12T01:17:18,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741902_1078 (size=51979256) 2024-12-12T01:17:18,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741903_1079 (size=17520) 2024-12-12T01:17:18,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741903_1079 (size=17520) 2024-12-12T01:17:18,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741903_1079 (size=17520) 2024-12-12T01:17:18,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741904_1080 (size=482) 2024-12-12T01:17:18,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741904_1080 (size=482) 2024-12-12T01:17:18,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741904_1080 (size=482) 2024-12-12T01:17:18,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741905_1081 (size=17520) 2024-12-12T01:17:18,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741905_1081 (size=17520) 2024-12-12T01:17:18,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741905_1081 (size=17520) 2024-12-12T01:17:18,742 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0001/container_1733966109028_0001_01_000002/launch_container.sh] 2024-12-12T01:17:18,742 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0001/container_1733966109028_0001_01_000002/container_tokens] 2024-12-12T01:17:18,742 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0001/container_1733966109028_0001_01_000002/sysfs] 2024-12-12T01:17:18,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741906_1082 (size=349756) 2024-12-12T01:17:18,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741906_1082 (size=349756) 2024-12-12T01:17:18,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741906_1082 (size=349756) 2024-12-12T01:17:18,779 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0001_000001 (auth:SIMPLE) from 127.0.0.1:38556 2024-12-12T01:17:20,709 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-12T01:17:20,711 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-12T01:17:20,721 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:20,721 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-12T01:17:20,722 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-12T01:17:20,722 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:20,723 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-12T01:17:20,723 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-12T01:17:20,723 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966125317/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966125317/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:20,723 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966125317/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-12T01:17:20,723 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966125317/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-12T01:17:20,743 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:20,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:20,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-12T01:17:20,755 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966240755"}]},"ts":"1733966240755"} 2024-12-12T01:17:20,758 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-12T01:17:20,758 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-12T01:17:20,761 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-12-12T01:17:20,767 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6d1c4885b300985f9d6d359b50722b5d, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=92d957b63c4b0d6beabf921ed8c70c65, UNASSIGN}] 2024-12-12T01:17:20,768 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6d1c4885b300985f9d6d359b50722b5d, UNASSIGN 2024-12-12T01:17:20,769 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=92d957b63c4b0d6beabf921ed8c70c65, UNASSIGN 2024-12-12T01:17:20,771 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=6d1c4885b300985f9d6d359b50722b5d, regionState=CLOSING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:17:20,771 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=92d957b63c4b0d6beabf921ed8c70c65, regionState=CLOSING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:17:20,773 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6d1c4885b300985f9d6d359b50722b5d, UNASSIGN because future has completed 2024-12-12T01:17:20,774 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:17:20,774 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6d1c4885b300985f9d6d359b50722b5d, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:17:20,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=92d957b63c4b0d6beabf921ed8c70c65, UNASSIGN because future has completed 2024-12-12T01:17:20,776 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:17:20,778 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 92d957b63c4b0d6beabf921ed8c70c65, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:17:20,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-12T01:17:20,928 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close 6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:17:20,928 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:17:20,928 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing 6d1c4885b300985f9d6d359b50722b5d, disabling compactions & flushes 2024-12-12T01:17:20,928 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d. 2024-12-12T01:17:20,928 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d. 2024-12-12T01:17:20,928 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d. after waiting 0 ms 2024-12-12T01:17:20,928 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d. 2024-12-12T01:17:20,935 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-12T01:17:20,936 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:17:20,936 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d. 2024-12-12T01:17:20,936 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for 6d1c4885b300985f9d6d359b50722b5d: Waiting for close lock at 1733966240928Running coprocessor pre-close hooks at 1733966240928Disabling compacts and flushes for region at 1733966240928Disabling writes for close at 1733966240928Writing region close event to WAL at 1733966240929 (+1 ms)Running coprocessor post-close hooks at 1733966240936 (+7 ms)Closed at 1733966240936 2024-12-12T01:17:20,938 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed 6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:17:20,938 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close 92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:17:20,938 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:17:20,939 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing 92d957b63c4b0d6beabf921ed8c70c65, disabling compactions & flushes 2024-12-12T01:17:20,939 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65. 2024-12-12T01:17:20,939 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65. 2024-12-12T01:17:20,939 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65. after waiting 0 ms 2024-12-12T01:17:20,939 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65. 2024-12-12T01:17:20,939 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=6d1c4885b300985f9d6d359b50722b5d, regionState=CLOSED 2024-12-12T01:17:20,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6d1c4885b300985f9d6d359b50722b5d, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:17:20,945 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-12T01:17:20,946 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:17:20,946 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65. 2024-12-12T01:17:20,946 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=33 2024-12-12T01:17:20,946 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure 6d1c4885b300985f9d6d359b50722b5d, server=36236b0d7605,38295,1733966101221 in 169 msec 2024-12-12T01:17:20,947 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for 92d957b63c4b0d6beabf921ed8c70c65: Waiting for close lock at 1733966240939Running coprocessor pre-close hooks at 1733966240939Disabling compacts and flushes for region at 1733966240939Disabling writes for close at 1733966240939Writing region close event to WAL at 1733966240940 (+1 ms)Running coprocessor post-close hooks at 1733966240946 (+6 ms)Closed at 1733966240946 2024-12-12T01:17:20,948 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=6d1c4885b300985f9d6d359b50722b5d, UNASSIGN in 179 msec 2024-12-12T01:17:20,949 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed 92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:17:20,950 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=92d957b63c4b0d6beabf921ed8c70c65, regionState=CLOSED 2024-12-12T01:17:20,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 92d957b63c4b0d6beabf921ed8c70c65, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:17:20,955 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=34 2024-12-12T01:17:20,956 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure 92d957b63c4b0d6beabf921ed8c70c65, server=36236b0d7605,38295,1733966101221 in 176 msec 2024-12-12T01:17:20,958 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=32 2024-12-12T01:17:20,958 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=92d957b63c4b0d6beabf921ed8c70c65, UNASSIGN in 189 msec 2024-12-12T01:17:20,961 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-12-12T01:17:20,961 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 199 msec 2024-12-12T01:17:20,963 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966240963"}]},"ts":"1733966240963"} 2024-12-12T01:17:20,965 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-12T01:17:20,965 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-12T01:17:20,968 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 220 msec 2024-12-12T01:17:21,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-12T01:17:21,075 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-12T01:17:21,079 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,087 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,089 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,093 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50335, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-12T01:17:21,094 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,099 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-12T01:17:21,099 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-12T01:17:21,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:21,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:21,100 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f 2024-12-12T01:17:21,101 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-12T01:17:21,101 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-12T01:17:21,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:21,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:21,101 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-12T01:17:21,101 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-12T01:17:21,101 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:17:21,102 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:17:21,102 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:21,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-12T01:17:21,103 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:21,103 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:21,103 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:21,108 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65/recovered.edits] 2024-12-12T01:17:21,108 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d/recovered.edits] 2024-12-12T01:17:21,108 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/recovered.edits] 2024-12-12T01:17:21,117 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_.d479e027beb0f7e53a821180da4a387f to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_.d479e027beb0f7e53a821180da4a387f 2024-12-12T01:17:21,117 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_ to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_ 2024-12-12T01:17:21,118 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_.d479e027beb0f7e53a821180da4a387f to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d/cf/329ae755b0ce4ddcb7831c6b4f21f9a8_SeqId_4_.d479e027beb0f7e53a821180da4a387f 2024-12-12T01:17:21,122 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65/recovered.edits/10.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65/recovered.edits/10.seqid 2024-12-12T01:17:21,122 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/recovered.edits/6.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f/recovered.edits/6.seqid 2024-12-12T01:17:21,122 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d/recovered.edits/10.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d/recovered.edits/10.seqid 2024-12-12T01:17:21,122 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/92d957b63c4b0d6beabf921ed8c70c65 2024-12-12T01:17:21,123 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/d479e027beb0f7e53a821180da4a387f 2024-12-12T01:17:21,123 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportFileSystemStateWithSplitRegion/6d1c4885b300985f9d6d359b50722b5d 2024-12-12T01:17:21,123 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-12-12T01:17:21,126 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44743 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-12T01:17:21,138 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-12T01:17:21,142 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-12T01:17:21,144 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,144 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-12-12T01:17:21,145 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966241145"}]},"ts":"9223372036854775807"} 2024-12-12T01:17:21,145 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966241145"}]},"ts":"9223372036854775807"} 2024-12-12T01:17:21,145 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966241145"}]},"ts":"9223372036854775807"} 2024-12-12T01:17:21,149 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-12-12T01:17:21,149 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d479e027beb0f7e53a821180da4a387f, NAME => 'testExportFileSystemStateWithSplitRegion,,1733966116384.d479e027beb0f7e53a821180da4a387f.', STARTKEY => '', ENDKEY => ''}, {ENCODED => 6d1c4885b300985f9d6d359b50722b5d, NAME => 'testExportFileSystemStateWithSplitRegion,,1733966122992.6d1c4885b300985f9d6d359b50722b5d.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => 92d957b63c4b0d6beabf921ed8c70c65, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733966122992.92d957b63c4b0d6beabf921ed8c70c65.', STARTKEY => '5', ENDKEY => ''}] 2024-12-12T01:17:21,149 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-12T01:17:21,149 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733966241149"}]},"ts":"9223372036854775807"} 2024-12-12T01:17:21,152 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-12-12T01:17:21,153 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,154 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 73 msec 2024-12-12T01:17:21,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-12T01:17:21,205 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,206 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-12T01:17:21,207 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-12T01:17:21,211 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966241211"}]},"ts":"1733966241211"} 2024-12-12T01:17:21,213 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-12T01:17:21,213 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-12T01:17:21,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-12-12T01:17:21,217 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1e4a47b0b955b8e01f81d0da6b552a65, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=83614ff2025743f7080c6ab5c0ee90ed, UNASSIGN}] 2024-12-12T01:17:21,218 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1e4a47b0b955b8e01f81d0da6b552a65, UNASSIGN 2024-12-12T01:17:21,218 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=83614ff2025743f7080c6ab5c0ee90ed, UNASSIGN 2024-12-12T01:17:21,219 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=1e4a47b0b955b8e01f81d0da6b552a65, regionState=CLOSING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:17:21,219 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=83614ff2025743f7080c6ab5c0ee90ed, regionState=CLOSING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:17:21,222 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1e4a47b0b955b8e01f81d0da6b552a65, UNASSIGN because future has completed 2024-12-12T01:17:21,222 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:17:21,223 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1e4a47b0b955b8e01f81d0da6b552a65, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:17:21,223 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=83614ff2025743f7080c6ab5c0ee90ed, UNASSIGN because future has completed 2024-12-12T01:17:21,228 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:17:21,228 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 83614ff2025743f7080c6ab5c0ee90ed, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:17:21,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-12T01:17:21,376 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:17:21,376 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:17:21,377 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 1e4a47b0b955b8e01f81d0da6b552a65, disabling compactions & flushes 2024-12-12T01:17:21,377 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:17:21,377 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:17:21,377 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. after waiting 0 ms 2024-12-12T01:17:21,377 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:17:21,381 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51269, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T01:17:21,384 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:17:21,384 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:17:21,385 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing 83614ff2025743f7080c6ab5c0ee90ed, disabling compactions & flushes 2024-12-12T01:17:21,385 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. 2024-12-12T01:17:21,385 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. 2024-12-12T01:17:21,385 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. after waiting 0 ms 2024-12-12T01:17:21,385 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. 2024-12-12T01:17:21,386 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:17:21,387 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:17:21,387 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65. 2024-12-12T01:17:21,387 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 1e4a47b0b955b8e01f81d0da6b552a65: Waiting for close lock at 1733966241376Running coprocessor pre-close hooks at 1733966241376Disabling compacts and flushes for region at 1733966241377 (+1 ms)Disabling writes for close at 1733966241377Writing region close event to WAL at 1733966241378 (+1 ms)Running coprocessor post-close hooks at 1733966241386 (+8 ms)Closed at 1733966241387 (+1 ms) 2024-12-12T01:17:21,389 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:17:21,390 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=1e4a47b0b955b8e01f81d0da6b552a65, regionState=CLOSED 2024-12-12T01:17:21,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1e4a47b0b955b8e01f81d0da6b552a65, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:17:21,398 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=40 2024-12-12T01:17:21,398 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 1e4a47b0b955b8e01f81d0da6b552a65, server=36236b0d7605,38295,1733966101221 in 170 msec 2024-12-12T01:17:21,398 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:17:21,399 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:17:21,400 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed. 2024-12-12T01:17:21,400 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for 83614ff2025743f7080c6ab5c0ee90ed: Waiting for close lock at 1733966241384Running coprocessor pre-close hooks at 1733966241384Disabling compacts and flushes for region at 1733966241385 (+1 ms)Disabling writes for close at 1733966241385Writing region close event to WAL at 1733966241387 (+2 ms)Running coprocessor post-close hooks at 1733966241399 (+12 ms)Closed at 1733966241399 2024-12-12T01:17:21,401 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1e4a47b0b955b8e01f81d0da6b552a65, UNASSIGN in 181 msec 2024-12-12T01:17:21,405 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed 83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:17:21,406 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=83614ff2025743f7080c6ab5c0ee90ed, regionState=CLOSED 2024-12-12T01:17:21,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 83614ff2025743f7080c6ab5c0ee90ed, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:17:21,418 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=41 2024-12-12T01:17:21,419 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure 83614ff2025743f7080c6ab5c0ee90ed, server=36236b0d7605,33883,1733966101479 in 187 msec 2024-12-12T01:17:21,420 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=41, resume processing ppid=39 2024-12-12T01:17:21,420 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=83614ff2025743f7080c6ab5c0ee90ed, UNASSIGN in 201 msec 2024-12-12T01:17:21,423 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-12-12T01:17:21,423 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 206 msec 2024-12-12T01:17:21,425 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966241424"}]},"ts":"1733966241424"} 2024-12-12T01:17:21,427 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-12T01:17:21,427 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-12T01:17:21,429 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 221 msec 2024-12-12T01:17:21,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-12T01:17:21,526 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-12T01:17:21,527 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,532 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,535 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,539 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,541 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:17:21,541 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:17:21,544 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed/recovered.edits] 2024-12-12T01:17:21,544 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65/recovered.edits] 2024-12-12T01:17:21,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,547 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-12T01:17:21,547 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-12T01:17:21,547 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-12T01:17:21,547 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-12T01:17:21,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:21,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:21,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:21,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:21,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-12T01:17:21,553 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed/cf/23c746c74b7740b693d3925258dcffad to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed/cf/23c746c74b7740b693d3925258dcffad 2024-12-12T01:17:21,553 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65/cf/061716702899460e97581ebc7f065e27 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65/cf/061716702899460e97581ebc7f065e27 2024-12-12T01:17:21,559 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65/recovered.edits/9.seqid 2024-12-12T01:17:21,559 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed/recovered.edits/9.seqid 2024-12-12T01:17:21,560 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/1e4a47b0b955b8e01f81d0da6b552a65 2024-12-12T01:17:21,561 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSplitRegion/83614ff2025743f7080c6ab5c0ee90ed 2024-12-12T01:17:21,561 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-12-12T01:17:21,564 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,568 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-12T01:17:21,572 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-12T01:17:21,574 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,574 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-12-12T01:17:21,574 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966241574"}]},"ts":"9223372036854775807"} 2024-12-12T01:17:21,574 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966241574"}]},"ts":"9223372036854775807"} 2024-12-12T01:17:21,577 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-12T01:17:21,577 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 1e4a47b0b955b8e01f81d0da6b552a65, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733966111411.1e4a47b0b955b8e01f81d0da6b552a65.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 83614ff2025743f7080c6ab5c0ee90ed, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733966111411.83614ff2025743f7080c6ab5c0ee90ed.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T01:17:21,578 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-12T01:17:21,578 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733966241578"}]},"ts":"9223372036854775807"} 2024-12-12T01:17:21,581 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-12-12T01:17:21,582 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,584 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 55 msec 2024-12-12T01:17:21,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-12T01:17:21,655 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,656 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-12T01:17:21,675 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-12T01:17:21,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,681 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-12T01:17:21,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,689 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-12T01:17:21,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:21,751 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=758 (was 715) Potentially hanging thread: RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1367 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2009853946_1 at /127.0.0.1:50554 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/36236b0d7605:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:50574 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/36236b0d7605:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2009853946_1 at /127.0.0.1:54678 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:43553 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43553 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:44400 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 16571) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/36236b0d7605:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/36236b0d7605:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:54708 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/36236b0d7605:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=767 (was 757) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=517 (was 601), ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=5400 (was 10038) 2024-12-12T01:17:21,752 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=758 is superior to 500 2024-12-12T01:17:21,781 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=758, OpenFileDescriptor=767, MaxFileDescriptor=1048576, SystemLoadAverage=517, ProcessCount=17, AvailableMemoryMB=5391 2024-12-12T01:17:21,781 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=758 is superior to 500 2024-12-12T01:17:21,783 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T01:17:21,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-12T01:17:21,790 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T01:17:21,790 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:17:21,792 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T01:17:21,793 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-12-12T01:17:21,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-12T01:17:21,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741907_1083 (size=406) 2024-12-12T01:17:21,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741907_1083 (size=406) 2024-12-12T01:17:21,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741907_1083 (size=406) 2024-12-12T01:17:21,833 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b3b34f604318f19ca9be86928446a86d, NAME => 'testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:17:21,845 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => eff4f35cfe33910ee74fcb87369073e0, NAME => 'testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:17:21,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741908_1084 (size=67) 2024-12-12T01:17:21,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741908_1084 (size=67) 2024-12-12T01:17:21,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741908_1084 (size=67) 2024-12-12T01:17:21,880 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:17:21,881 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing b3b34f604318f19ca9be86928446a86d, disabling compactions & flushes 2024-12-12T01:17:21,881 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:21,881 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:21,881 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. after waiting 0 ms 2024-12-12T01:17:21,881 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:21,881 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:21,881 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for b3b34f604318f19ca9be86928446a86d: Waiting for close lock at 1733966241881Disabling compacts and flushes for region at 1733966241881Disabling writes for close at 1733966241881Writing region close event to WAL at 1733966241881Closed at 1733966241881 2024-12-12T01:17:21,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741909_1085 (size=67) 2024-12-12T01:17:21,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741909_1085 (size=67) 2024-12-12T01:17:21,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741909_1085 (size=67) 2024-12-12T01:17:21,899 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:17:21,900 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing eff4f35cfe33910ee74fcb87369073e0, disabling compactions & flushes 2024-12-12T01:17:21,900 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. 2024-12-12T01:17:21,900 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. 2024-12-12T01:17:21,900 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. after waiting 0 ms 2024-12-12T01:17:21,900 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. 2024-12-12T01:17:21,900 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. 2024-12-12T01:17:21,900 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for eff4f35cfe33910ee74fcb87369073e0: Waiting for close lock at 1733966241900Disabling compacts and flushes for region at 1733966241900Disabling writes for close at 1733966241900Writing region close event to WAL at 1733966241900Closed at 1733966241900 2024-12-12T01:17:21,902 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T01:17:21,902 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733966241902"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966241902"}]},"ts":"1733966241902"} 2024-12-12T01:17:21,902 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733966241902"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966241902"}]},"ts":"1733966241902"} 2024-12-12T01:17:21,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-12T01:17:21,907 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-12T01:17:21,908 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T01:17:21,908 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966241908"}]},"ts":"1733966241908"} 2024-12-12T01:17:21,911 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-12T01:17:21,911 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:17:21,913 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:17:21,913 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:17:21,913 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:17:21,913 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:17:21,913 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:17:21,913 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:17:21,913 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:17:21,914 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:17:21,914 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:17:21,914 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:17:21,914 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b3b34f604318f19ca9be86928446a86d, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eff4f35cfe33910ee74fcb87369073e0, ASSIGN}] 2024-12-12T01:17:21,916 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b3b34f604318f19ca9be86928446a86d, ASSIGN 2024-12-12T01:17:21,916 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eff4f35cfe33910ee74fcb87369073e0, ASSIGN 2024-12-12T01:17:21,918 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eff4f35cfe33910ee74fcb87369073e0, ASSIGN; state=OFFLINE, location=36236b0d7605,44743,1733966101408; forceNewPlan=false, retain=false 2024-12-12T01:17:21,919 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b3b34f604318f19ca9be86928446a86d, ASSIGN; state=OFFLINE, location=36236b0d7605,38295,1733966101221; forceNewPlan=false, retain=false 2024-12-12T01:17:22,069 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T01:17:22,070 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=b3b34f604318f19ca9be86928446a86d, regionState=OPENING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:17:22,070 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=eff4f35cfe33910ee74fcb87369073e0, regionState=OPENING, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:17:22,073 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b3b34f604318f19ca9be86928446a86d, ASSIGN because future has completed 2024-12-12T01:17:22,073 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure b3b34f604318f19ca9be86928446a86d, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:17:22,074 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eff4f35cfe33910ee74fcb87369073e0, ASSIGN because future has completed 2024-12-12T01:17:22,075 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure eff4f35cfe33910ee74fcb87369073e0, server=36236b0d7605,44743,1733966101408}] 2024-12-12T01:17:22,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-12T01:17:22,229 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49895, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T01:17:22,231 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:22,231 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => b3b34f604318f19ca9be86928446a86d, NAME => 'testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T01:17:22,232 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. service=AccessControlService 2024-12-12T01:17:22,232 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:17:22,232 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:22,233 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:17:22,233 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:22,233 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:22,235 INFO [StoreOpener-b3b34f604318f19ca9be86928446a86d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:22,236 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. 2024-12-12T01:17:22,236 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => eff4f35cfe33910ee74fcb87369073e0, NAME => 'testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T01:17:22,236 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. service=AccessControlService 2024-12-12T01:17:22,236 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:17:22,237 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:22,237 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:17:22,237 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:22,237 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:22,241 INFO [StoreOpener-b3b34f604318f19ca9be86928446a86d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b3b34f604318f19ca9be86928446a86d columnFamilyName cf 2024-12-12T01:17:22,241 DEBUG [StoreOpener-b3b34f604318f19ca9be86928446a86d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:17:22,242 INFO [StoreOpener-b3b34f604318f19ca9be86928446a86d-1 {}] regionserver.HStore(327): Store=b3b34f604318f19ca9be86928446a86d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:17:22,242 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:22,243 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:22,244 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:22,245 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:22,245 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:22,246 INFO [StoreOpener-eff4f35cfe33910ee74fcb87369073e0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:22,247 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:22,250 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:17:22,251 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened b3b34f604318f19ca9be86928446a86d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60666282, jitterRate=-0.09600195288658142}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:17:22,251 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:22,252 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for b3b34f604318f19ca9be86928446a86d: Running coprocessor pre-open hook at 1733966242233Writing region info on filesystem at 1733966242233Initializing all the Stores at 1733966242234 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966242234Cleaning up temporary data from old regions at 1733966242245 (+11 ms)Running coprocessor post-open hooks at 1733966242251 (+6 ms)Region opened successfully at 1733966242252 (+1 ms) 2024-12-12T01:17:22,253 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d., pid=48, masterSystemTime=1733966242226 2024-12-12T01:17:22,257 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:22,257 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:22,257 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=b3b34f604318f19ca9be86928446a86d, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:17:22,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure b3b34f604318f19ca9be86928446a86d, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:17:22,263 INFO [StoreOpener-eff4f35cfe33910ee74fcb87369073e0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eff4f35cfe33910ee74fcb87369073e0 columnFamilyName cf 2024-12-12T01:17:22,263 DEBUG [StoreOpener-eff4f35cfe33910ee74fcb87369073e0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:17:22,265 INFO [StoreOpener-eff4f35cfe33910ee74fcb87369073e0-1 {}] regionserver.HStore(327): Store=eff4f35cfe33910ee74fcb87369073e0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:17:22,265 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:22,266 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:22,267 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:22,268 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:22,268 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:22,269 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=46 2024-12-12T01:17:22,269 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure b3b34f604318f19ca9be86928446a86d, server=36236b0d7605,38295,1733966101221 in 190 msec 2024-12-12T01:17:22,270 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:22,272 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b3b34f604318f19ca9be86928446a86d, ASSIGN in 355 msec 2024-12-12T01:17:22,278 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:17:22,279 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened eff4f35cfe33910ee74fcb87369073e0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74179698, jitterRate=0.1053636372089386}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:17:22,279 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:22,279 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for eff4f35cfe33910ee74fcb87369073e0: Running coprocessor pre-open hook at 1733966242237Writing region info on filesystem at 1733966242237Initializing all the Stores at 1733966242241 (+4 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966242241Cleaning up temporary data from old regions at 1733966242268 (+27 ms)Running coprocessor post-open hooks at 1733966242279 (+11 ms)Region opened successfully at 1733966242279 2024-12-12T01:17:22,284 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0., pid=49, masterSystemTime=1733966242228 2024-12-12T01:17:22,287 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. 2024-12-12T01:17:22,287 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. 2024-12-12T01:17:22,287 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=eff4f35cfe33910ee74fcb87369073e0, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:17:22,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure eff4f35cfe33910ee74fcb87369073e0, server=36236b0d7605,44743,1733966101408 because future has completed 2024-12-12T01:17:22,296 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=47 2024-12-12T01:17:22,296 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure eff4f35cfe33910ee74fcb87369073e0, server=36236b0d7605,44743,1733966101408 in 217 msec 2024-12-12T01:17:22,300 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=47, resume processing ppid=45 2024-12-12T01:17:22,300 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eff4f35cfe33910ee74fcb87369073e0, ASSIGN in 382 msec 2024-12-12T01:17:22,301 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T01:17:22,301 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966242301"}]},"ts":"1733966242301"} 2024-12-12T01:17:22,304 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-12T01:17:22,305 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T01:17:22,305 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-12T01:17:22,309 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-12T01:17:22,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:22,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:22,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:22,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:22,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:22,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:22,315 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:22,315 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:22,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 532 msec 2024-12-12T01:17:22,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-12T01:17:22,426 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-12T01:17:22,426 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-12-12T01:17:22,426 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:17:22,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-12-12T01:17:22,433 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:17:22,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithTargetName assigned. 2024-12-12T01:17:22,433 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-12T01:17:22,438 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-12T01:17:22,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966242438 (current time:1733966242438). 2024-12-12T01:17:22,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:17:22,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-12T01:17:22,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:17:22,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4edac0ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:22,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:17:22,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:17:22,441 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:17:22,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:17:22,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:17:22,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6feb12a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:22,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:17:22,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:17:22,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:22,443 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51116, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:17:22,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79bdd4e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:22,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:17:22,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:17:22,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:17:22,447 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54922, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:17:22,449 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:17:22,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:17:22,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:22,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:22,450 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:17:22,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40ae7b1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:22,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:17:22,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:17:22,452 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:17:22,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:17:22,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:17:22,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b82381e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:22,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:17:22,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:17:22,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:22,454 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51134, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:17:22,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cd16b95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:22,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:17:22,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:17:22,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:17:22,458 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54934, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:17:22,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:22,463 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:17:22,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:17:22,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:22,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:22,463 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:17:22,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-12T01:17:22,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:17:22,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-12T01:17:22,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-12T01:17:22,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-12T01:17:22,470 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:17:22,471 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:17:22,474 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:17:22,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741910_1086 (size=167) 2024-12-12T01:17:22,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741910_1086 (size=167) 2024-12-12T01:17:22,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741910_1086 (size=167) 2024-12-12T01:17:22,498 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:17:22,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b3b34f604318f19ca9be86928446a86d}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eff4f35cfe33910ee74fcb87369073e0}] 2024-12-12T01:17:22,500 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:22,500 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:22,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-12T01:17:22,654 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-12-12T01:17:22,654 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44743 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-12-12T01:17:22,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:22,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. 2024-12-12T01:17:22,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for eff4f35cfe33910ee74fcb87369073e0: 2024-12-12T01:17:22,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for b3b34f604318f19ca9be86928446a86d: 2024-12-12T01:17:22,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. for emptySnaptb0-testExportWithTargetName completed. 2024-12-12T01:17:22,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. for emptySnaptb0-testExportWithTargetName completed. 2024-12-12T01:17:22,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-12T01:17:22,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:17:22,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-12T01:17:22,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:17:22,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:17:22,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:17:22,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741911_1087 (size=70) 2024-12-12T01:17:22,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741911_1087 (size=70) 2024-12-12T01:17:22,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741911_1087 (size=70) 2024-12-12T01:17:22,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741912_1088 (size=70) 2024-12-12T01:17:22,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741912_1088 (size=70) 2024-12-12T01:17:22,694 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. 2024-12-12T01:17:22,694 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-12T01:17:22,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741912_1088 (size=70) 2024-12-12T01:17:22,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-12-12T01:17:22,695 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:22,695 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:22,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:22,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-12-12T01:17:22,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-12-12T01:17:22,696 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:22,696 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:22,698 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure eff4f35cfe33910ee74fcb87369073e0 in 198 msec 2024-12-12T01:17:22,700 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=51, resume processing ppid=50 2024-12-12T01:17:22,700 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b3b34f604318f19ca9be86928446a86d in 199 msec 2024-12-12T01:17:22,700 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:17:22,702 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:17:22,703 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:17:22,703 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-12T01:17:22,704 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-12T01:17:22,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741913_1089 (size=549) 2024-12-12T01:17:22,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741913_1089 (size=549) 2024-12-12T01:17:22,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741913_1089 (size=549) 2024-12-12T01:17:22,738 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:17:22,747 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:17:22,748 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-12T01:17:22,750 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:17:22,750 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-12T01:17:22,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 285 msec 2024-12-12T01:17:22,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-12T01:17:22,785 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-12T01:17:22,791 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='02be9f9cf929ca9ab2ff7d21f6c7099f2', locateType=CURRENT is [region=testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:17:22,792 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='12b594ec7cb715a76a91a25085605ce5e', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:22,797 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='26e4d80abf471282a5b6940cb20a1f0db', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:22,802 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40078, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:17:22,803 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:17:22,806 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44743 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:17:22,810 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-12T01:17:22,814 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-12T01:17:22,814 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:22,815 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:17:22,817 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-12T01:17:22,826 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-12T01:17:22,835 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-12T01:17:22,839 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-12T01:17:22,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966242839 (current time:1733966242839). 2024-12-12T01:17:22,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:17:22,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-12T01:17:22,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:17:22,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a6690ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:22,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:17:22,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:17:22,841 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:17:22,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:17:22,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:17:22,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@473f6379, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:22,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:17:22,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:17:22,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:22,843 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40008, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:17:22,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@686a5fb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:22,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:17:22,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:17:22,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:17:22,846 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57482, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:17:22,848 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:17:22,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:17:22,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:22,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:22,848 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:17:22,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79fb8e6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:22,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:17:22,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:17:22,852 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:17:22,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:17:22,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:17:22,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@566c1180, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:22,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:17:22,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:17:22,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:22,854 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40028, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:17:22,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28c74b82, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:22,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:17:22,857 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:17:22,857 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:17:22,858 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57498, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:17:22,860 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:22,862 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:17:22,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:17:22,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:22,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:22,862 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:17:22,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-12T01:17:22,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:17:22,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-12T01:17:22,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-12T01:17:22,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-12T01:17:22,866 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:17:22,867 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:17:22,884 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:17:22,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741914_1090 (size=162) 2024-12-12T01:17:22,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741914_1090 (size=162) 2024-12-12T01:17:22,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741914_1090 (size=162) 2024-12-12T01:17:22,914 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:17:22,914 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b3b34f604318f19ca9be86928446a86d}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eff4f35cfe33910ee74fcb87369073e0}] 2024-12-12T01:17:22,915 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:22,915 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:22,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-12T01:17:23,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44743 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-12-12T01:17:23,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-12-12T01:17:23,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:23,068 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing b3b34f604318f19ca9be86928446a86d 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-12T01:17:23,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. 2024-12-12T01:17:23,068 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing eff4f35cfe33910ee74fcb87369073e0 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-12T01:17:23,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d/.tmp/cf/5f3412be156549d0a5f4bbe807d710a7 is 71, key is 07f245a50a142036e04aece51ed0ce12/cf:q/1733966242803/Put/seqid=0 2024-12-12T01:17:23,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0/.tmp/cf/67133b7b37844da0833be3d860f36347 is 71, key is 201ab410c036586388e83e6cdb02a63d/cf:q/1733966242806/Put/seqid=0 2024-12-12T01:17:23,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741915_1091 (size=5216) 2024-12-12T01:17:23,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741915_1091 (size=5216) 2024-12-12T01:17:23,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741915_1091 (size=5216) 2024-12-12T01:17:23,115 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d/.tmp/cf/5f3412be156549d0a5f4bbe807d710a7 2024-12-12T01:17:23,123 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d/.tmp/cf/5f3412be156549d0a5f4bbe807d710a7 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d/cf/5f3412be156549d0a5f4bbe807d710a7 2024-12-12T01:17:23,131 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d/cf/5f3412be156549d0a5f4bbe807d710a7, entries=2, sequenceid=6, filesize=5.1 K 2024-12-12T01:17:23,132 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for b3b34f604318f19ca9be86928446a86d in 64ms, sequenceid=6, compaction requested=false 2024-12-12T01:17:23,132 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-12T01:17:23,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for b3b34f604318f19ca9be86928446a86d: 2024-12-12T01:17:23,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. for snaptb0-testExportWithTargetName completed. 2024-12-12T01:17:23,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-12T01:17:23,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:17:23,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d/cf/5f3412be156549d0a5f4bbe807d710a7] hfiles 2024-12-12T01:17:23,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d/cf/5f3412be156549d0a5f4bbe807d710a7 for snapshot=snaptb0-testExportWithTargetName 2024-12-12T01:17:23,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741916_1092 (size=8394) 2024-12-12T01:17:23,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741916_1092 (size=8394) 2024-12-12T01:17:23,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741916_1092 (size=8394) 2024-12-12T01:17:23,141 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0/.tmp/cf/67133b7b37844da0833be3d860f36347 2024-12-12T01:17:23,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741917_1093 (size=109) 2024-12-12T01:17:23,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741917_1093 (size=109) 2024-12-12T01:17:23,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741917_1093 (size=109) 2024-12-12T01:17:23,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:23,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-12T01:17:23,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-12-12T01:17:23,167 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:23,167 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:23,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b3b34f604318f19ca9be86928446a86d in 254 msec 2024-12-12T01:17:23,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0/.tmp/cf/67133b7b37844da0833be3d860f36347 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0/cf/67133b7b37844da0833be3d860f36347 2024-12-12T01:17:23,179 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0/cf/67133b7b37844da0833be3d860f36347, entries=48, sequenceid=6, filesize=8.2 K 2024-12-12T01:17:23,180 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for eff4f35cfe33910ee74fcb87369073e0 in 112ms, sequenceid=6, compaction requested=false 2024-12-12T01:17:23,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for eff4f35cfe33910ee74fcb87369073e0: 2024-12-12T01:17:23,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. for snaptb0-testExportWithTargetName completed. 2024-12-12T01:17:23,181 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-12T01:17:23,181 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:17:23,181 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0/cf/67133b7b37844da0833be3d860f36347] hfiles 2024-12-12T01:17:23,181 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0/cf/67133b7b37844da0833be3d860f36347 for snapshot=snaptb0-testExportWithTargetName 2024-12-12T01:17:23,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-12T01:17:23,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741918_1094 (size=109) 2024-12-12T01:17:23,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741918_1094 (size=109) 2024-12-12T01:17:23,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741918_1094 (size=109) 2024-12-12T01:17:23,195 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. 2024-12-12T01:17:23,195 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-12-12T01:17:23,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-12-12T01:17:23,195 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:23,196 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:23,199 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-12-12T01:17:23,199 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure eff4f35cfe33910ee74fcb87369073e0 in 283 msec 2024-12-12T01:17:23,199 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:17:23,199 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:17:23,200 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:17:23,200 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-12T01:17:23,201 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-12T01:17:23,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741919_1095 (size=627) 2024-12-12T01:17:23,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741919_1095 (size=627) 2024-12-12T01:17:23,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741919_1095 (size=627) 2024-12-12T01:17:23,251 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:17:23,259 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:17:23,260 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-12T01:17:23,263 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:17:23,263 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-12T01:17:23,265 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 400 msec 2024-12-12T01:17:23,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-12T01:17:23,495 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-12T01:17:23,495 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966243495 2024-12-12T01:17:23,496 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33353, tgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966243495, rawTgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966243495, srcFsUri=hdfs://localhost:33353, srcDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:17:23,545 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33353, inputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:17:23,545 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966243495, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966243495/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-12T01:17:23,548 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-12T01:17:23,564 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966243495/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-12T01:17:23,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741920_1096 (size=162) 2024-12-12T01:17:23,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741920_1096 (size=162) 2024-12-12T01:17:23,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741920_1096 (size=162) 2024-12-12T01:17:23,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741921_1097 (size=627) 2024-12-12T01:17:23,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741921_1097 (size=627) 2024-12-12T01:17:23,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741921_1097 (size=627) 2024-12-12T01:17:24,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741922_1098 (size=154) 2024-12-12T01:17:24,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741922_1098 (size=154) 2024-12-12T01:17:24,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741922_1098 (size=154) 2024-12-12T01:17:24,039 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:24,040 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:24,041 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:24,877 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0001_000001 (auth:SIMPLE) from 127.0.0.1:47560 2024-12-12T01:17:24,884 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_3/usercache/jenkins/appcache/application_1733966109028_0001/container_1733966109028_0001_01_000001/launch_container.sh] 2024-12-12T01:17:24,884 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_3/usercache/jenkins/appcache/application_1733966109028_0001/container_1733966109028_0001_01_000001/container_tokens] 2024-12-12T01:17:24,884 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_3/usercache/jenkins/appcache/application_1733966109028_0001/container_1733966109028_0001_01_000001/sysfs] 2024-12-12T01:17:25,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-15156640778191496155.jar 2024-12-12T01:17:25,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:25,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:25,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-14365289999597330191.jar 2024-12-12T01:17:25,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:25,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:25,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:25,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:25,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:25,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:25,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T01:17:25,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T01:17:25,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T01:17:25,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T01:17:25,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T01:17:25,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T01:17:25,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T01:17:25,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T01:17:25,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T01:17:25,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T01:17:25,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T01:17:25,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:17:25,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:17:25,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:17:25,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:17:25,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:17:25,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:17:25,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:17:25,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741923_1099 (size=131440) 2024-12-12T01:17:25,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741923_1099 (size=131440) 2024-12-12T01:17:25,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741923_1099 (size=131440) 2024-12-12T01:17:25,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741924_1100 (size=4188619) 2024-12-12T01:17:25,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741924_1100 (size=4188619) 2024-12-12T01:17:25,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741924_1100 (size=4188619) 2024-12-12T01:17:25,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741925_1101 (size=1323991) 2024-12-12T01:17:25,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741925_1101 (size=1323991) 2024-12-12T01:17:25,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741925_1101 (size=1323991) 2024-12-12T01:17:25,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741926_1102 (size=903541) 2024-12-12T01:17:25,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741926_1102 (size=903541) 2024-12-12T01:17:25,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741926_1102 (size=903541) 2024-12-12T01:17:25,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741927_1103 (size=8360360) 2024-12-12T01:17:25,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741927_1103 (size=8360360) 2024-12-12T01:17:25,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741927_1103 (size=8360360) 2024-12-12T01:17:25,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741928_1104 (size=443172) 2024-12-12T01:17:25,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741928_1104 (size=443172) 2024-12-12T01:17:25,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741928_1104 (size=443172) 2024-12-12T01:17:25,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741929_1105 (size=1877034) 2024-12-12T01:17:25,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741929_1105 (size=1877034) 2024-12-12T01:17:25,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741929_1105 (size=1877034) 2024-12-12T01:17:25,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741930_1106 (size=77835) 2024-12-12T01:17:25,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741930_1106 (size=77835) 2024-12-12T01:17:25,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741930_1106 (size=77835) 2024-12-12T01:17:25,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741931_1107 (size=30949) 2024-12-12T01:17:25,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741931_1107 (size=30949) 2024-12-12T01:17:25,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741931_1107 (size=30949) 2024-12-12T01:17:25,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741932_1108 (size=1597213) 2024-12-12T01:17:25,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741932_1108 (size=1597213) 2024-12-12T01:17:25,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741932_1108 (size=1597213) 2024-12-12T01:17:25,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741933_1109 (size=4695811) 2024-12-12T01:17:25,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741933_1109 (size=4695811) 2024-12-12T01:17:25,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741933_1109 (size=4695811) 2024-12-12T01:17:25,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741934_1110 (size=232957) 2024-12-12T01:17:25,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741934_1110 (size=232957) 2024-12-12T01:17:25,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741934_1110 (size=232957) 2024-12-12T01:17:25,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741935_1111 (size=127628) 2024-12-12T01:17:25,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741935_1111 (size=127628) 2024-12-12T01:17:25,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741935_1111 (size=127628) 2024-12-12T01:17:25,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741936_1112 (size=20406) 2024-12-12T01:17:25,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741936_1112 (size=20406) 2024-12-12T01:17:25,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741936_1112 (size=20406) 2024-12-12T01:17:25,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741937_1113 (size=5175431) 2024-12-12T01:17:25,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741937_1113 (size=5175431) 2024-12-12T01:17:25,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741937_1113 (size=5175431) 2024-12-12T01:17:25,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741938_1114 (size=217634) 2024-12-12T01:17:25,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741938_1114 (size=217634) 2024-12-12T01:17:25,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741938_1114 (size=217634) 2024-12-12T01:17:25,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741939_1115 (size=1832290) 2024-12-12T01:17:25,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741939_1115 (size=1832290) 2024-12-12T01:17:25,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741939_1115 (size=1832290) 2024-12-12T01:17:26,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741940_1116 (size=322274) 2024-12-12T01:17:26,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741940_1116 (size=322274) 2024-12-12T01:17:26,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741940_1116 (size=322274) 2024-12-12T01:17:26,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741941_1117 (size=503880) 2024-12-12T01:17:26,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741941_1117 (size=503880) 2024-12-12T01:17:26,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741941_1117 (size=503880) 2024-12-12T01:17:26,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741942_1118 (size=29229) 2024-12-12T01:17:26,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741942_1118 (size=29229) 2024-12-12T01:17:26,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741942_1118 (size=29229) 2024-12-12T01:17:26,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741943_1119 (size=24096) 2024-12-12T01:17:26,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741943_1119 (size=24096) 2024-12-12T01:17:26,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741943_1119 (size=24096) 2024-12-12T01:17:26,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741944_1120 (size=111872) 2024-12-12T01:17:26,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741944_1120 (size=111872) 2024-12-12T01:17:26,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741944_1120 (size=111872) 2024-12-12T01:17:26,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741945_1121 (size=6426224) 2024-12-12T01:17:26,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741945_1121 (size=6426224) 2024-12-12T01:17:26,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741945_1121 (size=6426224) 2024-12-12T01:17:26,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741946_1122 (size=45609) 2024-12-12T01:17:26,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741946_1122 (size=45609) 2024-12-12T01:17:26,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741946_1122 (size=45609) 2024-12-12T01:17:26,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741947_1123 (size=136454) 2024-12-12T01:17:26,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741947_1123 (size=136454) 2024-12-12T01:17:26,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741947_1123 (size=136454) 2024-12-12T01:17:26,186 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T01:17:26,190 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-12T01:17:26,193 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.2 K 2024-12-12T01:17:26,193 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.1 K 2024-12-12T01:17:26,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741948_1124 (size=445) 2024-12-12T01:17:26,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741948_1124 (size=445) 2024-12-12T01:17:26,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741948_1124 (size=445) 2024-12-12T01:17:26,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741949_1125 (size=21) 2024-12-12T01:17:26,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741949_1125 (size=21) 2024-12-12T01:17:26,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741949_1125 (size=21) 2024-12-12T01:17:26,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741950_1126 (size=304007) 2024-12-12T01:17:26,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741950_1126 (size=304007) 2024-12-12T01:17:26,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741950_1126 (size=304007) 2024-12-12T01:17:26,261 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:17:26,261 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:17:26,481 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:17:26,739 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0002_000001 (auth:SIMPLE) from 127.0.0.1:45532 2024-12-12T01:17:28,909 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:17:30,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-12T01:17:30,712 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-12T01:17:30,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:30,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-12T01:17:35,890 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0002_000001 (auth:SIMPLE) from 127.0.0.1:52658 2024-12-12T01:17:36,218 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:17:36,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741951_1127 (size=349705) 2024-12-12T01:17:36,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741951_1127 (size=349705) 2024-12-12T01:17:36,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741951_1127 (size=349705) 2024-12-12T01:17:38,234 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0002_000001 (auth:SIMPLE) from 127.0.0.1:47320 2024-12-12T01:17:38,234 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0002_000001 (auth:SIMPLE) from 127.0.0.1:56262 2024-12-12T01:17:42,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741952_1128 (size=8394) 2024-12-12T01:17:42,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741952_1128 (size=8394) 2024-12-12T01:17:42,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741952_1128 (size=8394) 2024-12-12T01:17:42,640 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0002/container_1733966109028_0002_01_000002/launch_container.sh] 2024-12-12T01:17:42,640 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0002/container_1733966109028_0002_01_000002/container_tokens] 2024-12-12T01:17:42,640 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0002/container_1733966109028_0002_01_000002/sysfs] 2024-12-12T01:17:43,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741954_1130 (size=5216) 2024-12-12T01:17:43,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741954_1130 (size=5216) 2024-12-12T01:17:43,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741954_1130 (size=5216) 2024-12-12T01:17:43,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741953_1129 (size=22163) 2024-12-12T01:17:43,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741953_1129 (size=22163) 2024-12-12T01:17:43,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741953_1129 (size=22163) 2024-12-12T01:17:43,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741955_1131 (size=464) 2024-12-12T01:17:43,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741955_1131 (size=464) 2024-12-12T01:17:43,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741955_1131 (size=464) 2024-12-12T01:17:43,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741956_1132 (size=22163) 2024-12-12T01:17:43,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741956_1132 (size=22163) 2024-12-12T01:17:43,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741956_1132 (size=22163) 2024-12-12T01:17:43,251 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_3/usercache/jenkins/appcache/application_1733966109028_0002/container_1733966109028_0002_01_000003/launch_container.sh] 2024-12-12T01:17:43,251 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_3/usercache/jenkins/appcache/application_1733966109028_0002/container_1733966109028_0002_01_000003/container_tokens] 2024-12-12T01:17:43,251 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_3/usercache/jenkins/appcache/application_1733966109028_0002/container_1733966109028_0002_01_000003/sysfs] 2024-12-12T01:17:43,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741957_1133 (size=349705) 2024-12-12T01:17:43,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741957_1133 (size=349705) 2024-12-12T01:17:43,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741957_1133 (size=349705) 2024-12-12T01:17:43,289 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0002_000001 (auth:SIMPLE) from 127.0.0.1:36560 2024-12-12T01:17:44,521 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-12T01:17:44,523 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-12T01:17:44,530 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-12-12T01:17:44,530 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-12T01:17:44,531 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-12T01:17:44,531 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-12T01:17:44,531 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-12T01:17:44,531 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-12T01:17:44,532 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966243495/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966243495/.hbase-snapshot/testExportWithTargetName 2024-12-12T01:17:44,532 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966243495/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-12T01:17:44,532 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966243495/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-12T01:17:44,539 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-12T01:17:44,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-12T01:17:44,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-12T01:17:44,543 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966264543"}]},"ts":"1733966264543"} 2024-12-12T01:17:44,545 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-12T01:17:44,545 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-12T01:17:44,545 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-12T01:17:44,547 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b3b34f604318f19ca9be86928446a86d, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eff4f35cfe33910ee74fcb87369073e0, UNASSIGN}] 2024-12-12T01:17:44,548 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eff4f35cfe33910ee74fcb87369073e0, UNASSIGN 2024-12-12T01:17:44,548 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b3b34f604318f19ca9be86928446a86d, UNASSIGN 2024-12-12T01:17:44,549 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=b3b34f604318f19ca9be86928446a86d, regionState=CLOSING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:17:44,549 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=eff4f35cfe33910ee74fcb87369073e0, regionState=CLOSING, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:17:44,551 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b3b34f604318f19ca9be86928446a86d, UNASSIGN because future has completed 2024-12-12T01:17:44,551 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:17:44,551 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure b3b34f604318f19ca9be86928446a86d, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:17:44,552 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eff4f35cfe33910ee74fcb87369073e0, UNASSIGN because future has completed 2024-12-12T01:17:44,552 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:17:44,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure eff4f35cfe33910ee74fcb87369073e0, server=36236b0d7605,44743,1733966101408}] 2024-12-12T01:17:44,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-12T01:17:44,704 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:44,704 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:17:44,704 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing b3b34f604318f19ca9be86928446a86d, disabling compactions & flushes 2024-12-12T01:17:44,704 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:44,704 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:44,704 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. after waiting 0 ms 2024-12-12T01:17:44,704 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:44,705 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:44,705 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:17:44,706 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing eff4f35cfe33910ee74fcb87369073e0, disabling compactions & flushes 2024-12-12T01:17:44,706 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. 2024-12-12T01:17:44,706 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. 2024-12-12T01:17:44,706 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. after waiting 0 ms 2024-12-12T01:17:44,706 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. 2024-12-12T01:17:44,711 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:17:44,712 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:17:44,712 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d. 2024-12-12T01:17:44,712 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for b3b34f604318f19ca9be86928446a86d: Waiting for close lock at 1733966264704Running coprocessor pre-close hooks at 1733966264704Disabling compacts and flushes for region at 1733966264704Disabling writes for close at 1733966264704Writing region close event to WAL at 1733966264706 (+2 ms)Running coprocessor post-close hooks at 1733966264711 (+5 ms)Closed at 1733966264712 (+1 ms) 2024-12-12T01:17:44,714 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:17:44,714 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:44,714 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:17:44,714 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=b3b34f604318f19ca9be86928446a86d, regionState=CLOSED 2024-12-12T01:17:44,714 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0. 2024-12-12T01:17:44,715 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for eff4f35cfe33910ee74fcb87369073e0: Waiting for close lock at 1733966264705Running coprocessor pre-close hooks at 1733966264705Disabling compacts and flushes for region at 1733966264705Disabling writes for close at 1733966264706 (+1 ms)Writing region close event to WAL at 1733966264707 (+1 ms)Running coprocessor post-close hooks at 1733966264714 (+7 ms)Closed at 1733966264714 2024-12-12T01:17:44,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure b3b34f604318f19ca9be86928446a86d, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:17:44,717 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:44,717 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=eff4f35cfe33910ee74fcb87369073e0, regionState=CLOSED 2024-12-12T01:17:44,720 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure eff4f35cfe33910ee74fcb87369073e0, server=36236b0d7605,44743,1733966101408 because future has completed 2024-12-12T01:17:44,720 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=58 2024-12-12T01:17:44,722 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure b3b34f604318f19ca9be86928446a86d, server=36236b0d7605,38295,1733966101221 in 167 msec 2024-12-12T01:17:44,722 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=b3b34f604318f19ca9be86928446a86d, UNASSIGN in 173 msec 2024-12-12T01:17:44,723 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-12-12T01:17:44,723 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure eff4f35cfe33910ee74fcb87369073e0, server=36236b0d7605,44743,1733966101408 in 169 msec 2024-12-12T01:17:44,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=57 2024-12-12T01:17:44,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eff4f35cfe33910ee74fcb87369073e0, UNASSIGN in 176 msec 2024-12-12T01:17:44,727 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-12-12T01:17:44,728 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 181 msec 2024-12-12T01:17:44,729 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966264729"}]},"ts":"1733966264729"} 2024-12-12T01:17:44,731 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-12T01:17:44,731 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-12T01:17:44,733 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 193 msec 2024-12-12T01:17:44,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-12T01:17:44,855 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-12T01:17:44,856 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-12T01:17:44,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-12T01:17:44,858 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-12T01:17:44,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-12T01:17:44,859 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-12T01:17:44,862 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-12T01:17:44,863 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:44,863 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:44,865 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d/recovered.edits] 2024-12-12T01:17:44,866 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0/recovered.edits] 2024-12-12T01:17:44,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-12T01:17:44,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-12T01:17:44,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-12T01:17:44,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-12T01:17:44,867 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-12T01:17:44,867 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-12T01:17:44,867 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-12T01:17:44,867 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-12T01:17:44,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-12T01:17:44,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-12T01:17:44,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-12T01:17:44,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:44,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:44,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:44,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-12T01:17:44,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:44,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-12T01:17:44,872 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0/cf/67133b7b37844da0833be3d860f36347 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0/cf/67133b7b37844da0833be3d860f36347 2024-12-12T01:17:44,873 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d/cf/5f3412be156549d0a5f4bbe807d710a7 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d/cf/5f3412be156549d0a5f4bbe807d710a7 2024-12-12T01:17:44,877 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d/recovered.edits/9.seqid 2024-12-12T01:17:44,877 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0/recovered.edits/9.seqid 2024-12-12T01:17:44,877 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/b3b34f604318f19ca9be86928446a86d 2024-12-12T01:17:44,878 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithTargetName/eff4f35cfe33910ee74fcb87369073e0 2024-12-12T01:17:44,878 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-12T01:17:44,881 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-12T01:17:44,885 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-12T01:17:44,889 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-12T01:17:44,890 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-12T01:17:44,890 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-12T01:17:44,891 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966264890"}]},"ts":"9223372036854775807"} 2024-12-12T01:17:44,891 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966264890"}]},"ts":"9223372036854775807"} 2024-12-12T01:17:44,894 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-12T01:17:44,894 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => b3b34f604318f19ca9be86928446a86d, NAME => 'testtb-testExportWithTargetName,,1733966241782.b3b34f604318f19ca9be86928446a86d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => eff4f35cfe33910ee74fcb87369073e0, NAME => 'testtb-testExportWithTargetName,1,1733966241782.eff4f35cfe33910ee74fcb87369073e0.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T01:17:44,894 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-12T01:17:44,894 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733966264894"}]},"ts":"9223372036854775807"} 2024-12-12T01:17:44,897 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-12-12T01:17:44,897 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-12T01:17:44,899 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 42 msec 2024-12-12T01:17:44,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-12T01:17:44,976 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-12-12T01:17:44,976 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-12T01:17:44,985 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-12-12T01:17:44,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-12T01:17:44,991 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-12-12T01:17:44,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-12T01:17:45,024 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=784 (was 758) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:49176 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:42175 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-2134 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: process reaper (pid 19685) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:43443 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:47902 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33919 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42175 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1494274710_1 at /127.0.0.1:47888 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34899 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43443 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:33919 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:42292 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=782 (was 767) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=630 (was 517) - SystemLoadAverage LEAK? -, ProcessCount=14 (was 17), AvailableMemoryMB=6943 (was 5391) - AvailableMemoryMB LEAK? - 2024-12-12T01:17:45,024 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=784 is superior to 500 2024-12-12T01:17:45,044 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=784, OpenFileDescriptor=782, MaxFileDescriptor=1048576, SystemLoadAverage=630, ProcessCount=14, AvailableMemoryMB=6943 2024-12-12T01:17:45,044 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=784 is superior to 500 2024-12-12T01:17:45,046 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T01:17:45,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T01:17:45,048 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T01:17:45,048 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:17:45,048 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-12-12T01:17:45,049 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T01:17:45,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-12T01:17:45,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741958_1134 (size=404) 2024-12-12T01:17:45,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741958_1134 (size=404) 2024-12-12T01:17:45,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741958_1134 (size=404) 2024-12-12T01:17:45,060 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5edcb373fdf5909fc7a4f8d149a52280, NAME => 'testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:17:45,061 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 7c31809c67fcfd8c14b5f7d7fce44ac2, NAME => 'testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:17:45,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741959_1135 (size=65) 2024-12-12T01:17:45,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741959_1135 (size=65) 2024-12-12T01:17:45,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741960_1136 (size=65) 2024-12-12T01:17:45,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741959_1135 (size=65) 2024-12-12T01:17:45,071 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:17:45,071 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 7c31809c67fcfd8c14b5f7d7fce44ac2, disabling compactions & flushes 2024-12-12T01:17:45,071 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. 2024-12-12T01:17:45,071 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. 2024-12-12T01:17:45,071 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. after waiting 0 ms 2024-12-12T01:17:45,071 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. 2024-12-12T01:17:45,071 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. 2024-12-12T01:17:45,071 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 7c31809c67fcfd8c14b5f7d7fce44ac2: Waiting for close lock at 1733966265071Disabling compacts and flushes for region at 1733966265071Disabling writes for close at 1733966265071Writing region close event to WAL at 1733966265071Closed at 1733966265071 2024-12-12T01:17:45,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741960_1136 (size=65) 2024-12-12T01:17:45,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741960_1136 (size=65) 2024-12-12T01:17:45,072 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:17:45,072 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 5edcb373fdf5909fc7a4f8d149a52280, disabling compactions & flushes 2024-12-12T01:17:45,072 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:17:45,073 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:17:45,073 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. after waiting 0 ms 2024-12-12T01:17:45,073 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:17:45,073 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:17:45,073 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5edcb373fdf5909fc7a4f8d149a52280: Waiting for close lock at 1733966265072Disabling compacts and flushes for region at 1733966265072Disabling writes for close at 1733966265073 (+1 ms)Writing region close event to WAL at 1733966265073Closed at 1733966265073 2024-12-12T01:17:45,074 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T01:17:45,074 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733966265074"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966265074"}]},"ts":"1733966265074"} 2024-12-12T01:17:45,075 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733966265074"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966265074"}]},"ts":"1733966265074"} 2024-12-12T01:17:45,078 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-12T01:17:45,079 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T01:17:45,079 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966265079"}]},"ts":"1733966265079"} 2024-12-12T01:17:45,081 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-12T01:17:45,081 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:17:45,083 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:17:45,083 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:17:45,083 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:17:45,083 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:17:45,083 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:17:45,083 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:17:45,083 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:17:45,083 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:17:45,083 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:17:45,083 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:17:45,084 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=5edcb373fdf5909fc7a4f8d149a52280, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7c31809c67fcfd8c14b5f7d7fce44ac2, ASSIGN}] 2024-12-12T01:17:45,085 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7c31809c67fcfd8c14b5f7d7fce44ac2, ASSIGN 2024-12-12T01:17:45,085 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=5edcb373fdf5909fc7a4f8d149a52280, ASSIGN 2024-12-12T01:17:45,086 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7c31809c67fcfd8c14b5f7d7fce44ac2, ASSIGN; state=OFFLINE, location=36236b0d7605,44743,1733966101408; forceNewPlan=false, retain=false 2024-12-12T01:17:45,087 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=5edcb373fdf5909fc7a4f8d149a52280, ASSIGN; state=OFFLINE, location=36236b0d7605,33883,1733966101479; forceNewPlan=false, retain=false 2024-12-12T01:17:45,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-12T01:17:45,237 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T01:17:45,237 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=5edcb373fdf5909fc7a4f8d149a52280, regionState=OPENING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:17:45,237 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=7c31809c67fcfd8c14b5f7d7fce44ac2, regionState=OPENING, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:17:45,240 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=5edcb373fdf5909fc7a4f8d149a52280, ASSIGN because future has completed 2024-12-12T01:17:45,240 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5edcb373fdf5909fc7a4f8d149a52280, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:17:45,241 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7c31809c67fcfd8c14b5f7d7fce44ac2, ASSIGN because future has completed 2024-12-12T01:17:45,241 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7c31809c67fcfd8c14b5f7d7fce44ac2, server=36236b0d7605,44743,1733966101408}] 2024-12-12T01:17:45,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-12T01:17:45,397 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:17:45,397 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. 2024-12-12T01:17:45,398 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => 5edcb373fdf5909fc7a4f8d149a52280, NAME => 'testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T01:17:45,398 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => 7c31809c67fcfd8c14b5f7d7fce44ac2, NAME => 'testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T01:17:45,398 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. service=AccessControlService 2024-12-12T01:17:45,398 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. service=AccessControlService 2024-12-12T01:17:45,398 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:17:45,398 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:17:45,398 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:45,398 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:17:45,399 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:45,399 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:45,399 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:45,399 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:17:45,399 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:45,399 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:45,403 INFO [StoreOpener-7c31809c67fcfd8c14b5f7d7fce44ac2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:45,403 INFO [StoreOpener-5edcb373fdf5909fc7a4f8d149a52280-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:45,404 INFO [StoreOpener-5edcb373fdf5909fc7a4f8d149a52280-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5edcb373fdf5909fc7a4f8d149a52280 columnFamilyName cf 2024-12-12T01:17:45,404 INFO [StoreOpener-7c31809c67fcfd8c14b5f7d7fce44ac2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c31809c67fcfd8c14b5f7d7fce44ac2 columnFamilyName cf 2024-12-12T01:17:45,405 DEBUG [StoreOpener-5edcb373fdf5909fc7a4f8d149a52280-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:17:45,405 DEBUG [StoreOpener-7c31809c67fcfd8c14b5f7d7fce44ac2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:17:45,405 INFO [StoreOpener-5edcb373fdf5909fc7a4f8d149a52280-1 {}] regionserver.HStore(327): Store=5edcb373fdf5909fc7a4f8d149a52280/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:17:45,405 INFO [StoreOpener-7c31809c67fcfd8c14b5f7d7fce44ac2-1 {}] regionserver.HStore(327): Store=7c31809c67fcfd8c14b5f7d7fce44ac2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:17:45,405 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:45,405 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:45,406 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:45,407 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:45,407 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:45,407 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:45,407 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:45,407 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:45,407 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:45,408 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:45,409 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:45,410 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:45,412 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:17:45,413 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened 7c31809c67fcfd8c14b5f7d7fce44ac2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64981917, jitterRate=-0.03169398009777069}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:17:45,413 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:45,414 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:17:45,415 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened 5edcb373fdf5909fc7a4f8d149a52280; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71398147, jitterRate=0.06391529738903046}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:17:45,415 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:45,415 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for 5edcb373fdf5909fc7a4f8d149a52280: Running coprocessor pre-open hook at 1733966265399Writing region info on filesystem at 1733966265399Initializing all the Stores at 1733966265400 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966265400Cleaning up temporary data from old regions at 1733966265408 (+8 ms)Running coprocessor post-open hooks at 1733966265415 (+7 ms)Region opened successfully at 1733966265415 2024-12-12T01:17:45,417 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for 7c31809c67fcfd8c14b5f7d7fce44ac2: Running coprocessor pre-open hook at 1733966265399Writing region info on filesystem at 1733966265399Initializing all the Stores at 1733966265400 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966265400Cleaning up temporary data from old regions at 1733966265407 (+7 ms)Running coprocessor post-open hooks at 1733966265413 (+6 ms)Region opened successfully at 1733966265417 (+4 ms) 2024-12-12T01:17:45,417 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280., pid=66, masterSystemTime=1733966265392 2024-12-12T01:17:45,419 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2., pid=67, masterSystemTime=1733966265394 2024-12-12T01:17:45,419 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:17:45,419 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:17:45,420 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=5edcb373fdf5909fc7a4f8d149a52280, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:17:45,421 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=7c31809c67fcfd8c14b5f7d7fce44ac2, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:17:45,421 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. 2024-12-12T01:17:45,422 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. 2024-12-12T01:17:45,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5edcb373fdf5909fc7a4f8d149a52280, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:17:45,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7c31809c67fcfd8c14b5f7d7fce44ac2, server=36236b0d7605,44743,1733966101408 because future has completed 2024-12-12T01:17:45,427 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=64 2024-12-12T01:17:45,427 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure 5edcb373fdf5909fc7a4f8d149a52280, server=36236b0d7605,33883,1733966101479 in 184 msec 2024-12-12T01:17:45,428 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=65 2024-12-12T01:17:45,428 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure 7c31809c67fcfd8c14b5f7d7fce44ac2, server=36236b0d7605,44743,1733966101408 in 185 msec 2024-12-12T01:17:45,429 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=5edcb373fdf5909fc7a4f8d149a52280, ASSIGN in 343 msec 2024-12-12T01:17:45,430 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=65, resume processing ppid=63 2024-12-12T01:17:45,430 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7c31809c67fcfd8c14b5f7d7fce44ac2, ASSIGN in 344 msec 2024-12-12T01:17:45,431 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T01:17:45,431 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966265431"}]},"ts":"1733966265431"} 2024-12-12T01:17:45,433 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-12T01:17:45,434 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T01:17:45,434 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-12T01:17:45,438 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-12T01:17:45,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:45,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:45,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:45,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:45,443 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:45,443 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:45,443 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:45,444 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:45,445 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 397 msec 2024-12-12T01:17:45,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-12T01:17:45,675 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-12T01:17:45,675 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-12T01:17:45,675 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:17:45,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-12T01:17:45,679 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:17:45,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithResetTtl assigned. 2024-12-12T01:17:45,679 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-12T01:17:45,683 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-12T01:17:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966265683 (current time:1733966265683). 2024-12-12T01:17:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:17:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-12T01:17:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:17:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c63b5eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:17:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:17:45,685 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:17:45,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:17:45,686 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:17:45,686 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71203399, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:45,686 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:17:45,686 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:17:45,686 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:45,687 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43586, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:17:45,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@608abfed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:45,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:17:45,689 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:17:45,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:17:45,691 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52370, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:17:45,692 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:17:45,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:17:45,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:45,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:45,692 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:17:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63917990, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:17:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:17:45,694 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:17:45,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:17:45,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:17:45,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52a16142, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:45,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:17:45,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:17:45,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:45,695 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43612, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:17:45,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9f64496, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:45,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:17:45,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:17:45,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:17:45,698 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52376, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:17:45,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:45,702 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:17:45,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:17:45,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:45,703 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:17:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-12T01:17:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:17:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-12T01:17:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-12T01:17:45,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-12T01:17:45,705 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:17:45,707 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:17:45,709 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:17:45,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741961_1137 (size=161) 2024-12-12T01:17:45,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741961_1137 (size=161) 2024-12-12T01:17:45,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741961_1137 (size=161) 2024-12-12T01:17:45,720 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:17:45,720 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5edcb373fdf5909fc7a4f8d149a52280}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c31809c67fcfd8c14b5f7d7fce44ac2}] 2024-12-12T01:17:45,721 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:45,721 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:45,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-12T01:17:45,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-12T01:17:45,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44743 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-12T01:17:45,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:17:45,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. 2024-12-12T01:17:45,874 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for 5edcb373fdf5909fc7a4f8d149a52280: 2024-12-12T01:17:45,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-12T01:17:45,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for 7c31809c67fcfd8c14b5f7d7fce44ac2: 2024-12-12T01:17:45,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-12T01:17:45,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-12T01:17:45,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-12T01:17:45,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:17:45,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:17:45,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:17:45,875 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:17:45,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741962_1138 (size=68) 2024-12-12T01:17:45,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741962_1138 (size=68) 2024-12-12T01:17:45,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741962_1138 (size=68) 2024-12-12T01:17:45,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:17:45,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-12T01:17:45,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-12-12T01:17:45,887 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:45,887 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:45,889 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5edcb373fdf5909fc7a4f8d149a52280 in 168 msec 2024-12-12T01:17:45,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741963_1139 (size=68) 2024-12-12T01:17:45,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741963_1139 (size=68) 2024-12-12T01:17:45,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741963_1139 (size=68) 2024-12-12T01:17:45,896 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. 2024-12-12T01:17:45,897 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-12T01:17:45,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-12-12T01:17:45,897 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:45,897 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:45,900 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=68 2024-12-12T01:17:45,900 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7c31809c67fcfd8c14b5f7d7fce44ac2 in 178 msec 2024-12-12T01:17:45,900 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:17:45,901 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:17:45,902 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:17:45,902 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-12T01:17:45,903 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-12T01:17:45,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741964_1140 (size=543) 2024-12-12T01:17:45,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741964_1140 (size=543) 2024-12-12T01:17:45,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741964_1140 (size=543) 2024-12-12T01:17:45,922 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:17:45,927 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:17:45,927 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-12T01:17:45,929 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:17:45,929 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-12T01:17:45,930 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 226 msec 2024-12-12T01:17:45,940 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-12T01:17:46,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-12T01:17:46,025 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-12T01:17:46,031 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='0091b6b15067aa1c7c5fff76e53418637', locateType=CURRENT is [region=testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:17:46,032 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='146340c6f86ee8993b3395189d725a7f2', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:46,034 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='2bdd0662fbc404fea7dcd57081c4f80d4', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:46,035 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='35b7ab4aee8005ea67b826f3e6768398a', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:46,036 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='43a213eac21f03fd04a32a6c23ac238e3', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:46,037 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='5abb03951be8a9210517b2214f485e1da', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:46,039 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57950, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:17:46,040 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33883 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:17:46,042 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44743 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:17:46,044 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-12T01:17:46,047 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-12T01:17:46,047 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:17:46,047 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:17:46,049 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-12T01:17:46,054 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-12T01:17:46,060 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-12T01:17:46,063 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-12T01:17:46,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966266063 (current time:1733966266063). 2024-12-12T01:17:46,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:17:46,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-12T01:17:46,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:17:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fc358b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:17:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:17:46,065 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:17:46,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:17:46,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:17:46,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c35470e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:46,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:17:46,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:17:46,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:46,066 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43644, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:17:46,067 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c7c6bdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:46,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:17:46,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:17:46,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:17:46,069 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52388, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:17:46,070 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:17:46,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:17:46,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:46,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:46,070 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:17:46,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75e6a360, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:46,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:17:46,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:17:46,072 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:17:46,073 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:17:46,073 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:17:46,073 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@438beb5e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:46,073 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:17:46,073 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:17:46,073 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:46,074 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43664, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:17:46,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@521bc852, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:46,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:17:46,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:17:46,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:17:46,078 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52394, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:17:46,079 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:46,081 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:17:46,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:17:46,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:46,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:46,081 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:17:46,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-12T01:17:46,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:17:46,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-12T01:17:46,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-12T01:17:46,084 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:17:46,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-12T01:17:46,085 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:17:46,087 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:17:46,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741965_1141 (size=156) 2024-12-12T01:17:46,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741965_1141 (size=156) 2024-12-12T01:17:46,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741965_1141 (size=156) 2024-12-12T01:17:46,096 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:17:46,097 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5edcb373fdf5909fc7a4f8d149a52280}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c31809c67fcfd8c14b5f7d7fce44ac2}] 2024-12-12T01:17:46,098 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:46,098 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:46,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-12T01:17:46,250 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44743 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-12T01:17:46,250 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. 2024-12-12T01:17:46,251 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing 7c31809c67fcfd8c14b5f7d7fce44ac2 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-12T01:17:46,251 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-12T01:17:46,251 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:17:46,251 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing 5edcb373fdf5909fc7a4f8d149a52280 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-12T01:17:46,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280/.tmp/cf/25643f0fd89e4053a0043d95293d7dfe is 71, key is 024f3322caf82bcd22eec95c43f4f827/cf:q/1733966266040/Put/seqid=0 2024-12-12T01:17:46,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2/.tmp/cf/48cc012447c24bb8ad6f381768691a92 is 71, key is 1a9ad53e896246c0603f12e6e402e5d9/cf:q/1733966266042/Put/seqid=0 2024-12-12T01:17:46,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741967_1143 (size=8326) 2024-12-12T01:17:46,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741967_1143 (size=8326) 2024-12-12T01:17:46,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741966_1142 (size=5288) 2024-12-12T01:17:46,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741967_1143 (size=8326) 2024-12-12T01:17:46,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741966_1142 (size=5288) 2024-12-12T01:17:46,291 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2/.tmp/cf/48cc012447c24bb8ad6f381768691a92 2024-12-12T01:17:46,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741966_1142 (size=5288) 2024-12-12T01:17:46,292 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280/.tmp/cf/25643f0fd89e4053a0043d95293d7dfe 2024-12-12T01:17:46,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280/.tmp/cf/25643f0fd89e4053a0043d95293d7dfe as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280/cf/25643f0fd89e4053a0043d95293d7dfe 2024-12-12T01:17:46,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2/.tmp/cf/48cc012447c24bb8ad6f381768691a92 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2/cf/48cc012447c24bb8ad6f381768691a92 2024-12-12T01:17:46,305 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280/cf/25643f0fd89e4053a0043d95293d7dfe, entries=3, sequenceid=6, filesize=5.2 K 2024-12-12T01:17:46,306 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 5edcb373fdf5909fc7a4f8d149a52280 in 55ms, sequenceid=6, compaction requested=false 2024-12-12T01:17:46,306 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for 5edcb373fdf5909fc7a4f8d149a52280: 2024-12-12T01:17:46,306 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. for snaptb0-testExportWithResetTtl completed. 2024-12-12T01:17:46,306 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-12T01:17:46,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:17:46,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280/cf/25643f0fd89e4053a0043d95293d7dfe] hfiles 2024-12-12T01:17:46,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280/cf/25643f0fd89e4053a0043d95293d7dfe for snapshot=snaptb0-testExportWithResetTtl 2024-12-12T01:17:46,308 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2/cf/48cc012447c24bb8ad6f381768691a92, entries=47, sequenceid=6, filesize=8.1 K 2024-12-12T01:17:46,310 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 7c31809c67fcfd8c14b5f7d7fce44ac2 in 60ms, sequenceid=6, compaction requested=false 2024-12-12T01:17:46,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for 7c31809c67fcfd8c14b5f7d7fce44ac2: 2024-12-12T01:17:46,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. for snaptb0-testExportWithResetTtl completed. 2024-12-12T01:17:46,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-12T01:17:46,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:17:46,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2/cf/48cc012447c24bb8ad6f381768691a92] hfiles 2024-12-12T01:17:46,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2/cf/48cc012447c24bb8ad6f381768691a92 for snapshot=snaptb0-testExportWithResetTtl 2024-12-12T01:17:46,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741968_1144 (size=107) 2024-12-12T01:17:46,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741968_1144 (size=107) 2024-12-12T01:17:46,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741968_1144 (size=107) 2024-12-12T01:17:46,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:17:46,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-12T01:17:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-12-12T01:17:46,320 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:46,320 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:17:46,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5edcb373fdf5909fc7a4f8d149a52280 in 226 msec 2024-12-12T01:17:46,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741969_1145 (size=107) 2024-12-12T01:17:46,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741969_1145 (size=107) 2024-12-12T01:17:46,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741969_1145 (size=107) 2024-12-12T01:17:46,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. 2024-12-12T01:17:46,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-12T01:17:46,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-12-12T01:17:46,339 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:46,339 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:17:46,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-12-12T01:17:46,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7c31809c67fcfd8c14b5f7d7fce44ac2 in 244 msec 2024-12-12T01:17:46,342 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:17:46,343 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:17:46,344 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:17:46,344 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-12T01:17:46,345 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-12T01:17:46,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741970_1146 (size=621) 2024-12-12T01:17:46,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741970_1146 (size=621) 2024-12-12T01:17:46,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741970_1146 (size=621) 2024-12-12T01:17:46,362 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:17:46,371 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:17:46,371 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-12T01:17:46,373 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:17:46,373 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-12T01:17:46,375 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 291 msec 2024-12-12T01:17:46,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-12T01:17:46,406 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-12T01:17:46,407 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T01:17:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-12-12T01:17:46,410 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T01:17:46,410 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:17:46,411 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-12-12T01:17:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-12T01:17:46,412 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T01:17:46,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741971_1147 (size=397) 2024-12-12T01:17:46,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741971_1147 (size=397) 2024-12-12T01:17:46,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741971_1147 (size=397) 2024-12-12T01:17:46,428 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ac9cb343f69e85ec77ebbc33279d44e8, NAME => 'testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:17:46,429 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ca8c69b4db61e470ea5f1d9784c4d954, NAME => 'testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:17:46,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741972_1148 (size=58) 2024-12-12T01:17:46,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741972_1148 (size=58) 2024-12-12T01:17:46,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741972_1148 (size=58) 2024-12-12T01:17:46,444 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:17:46,444 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing ac9cb343f69e85ec77ebbc33279d44e8, disabling compactions & flushes 2024-12-12T01:17:46,444 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. 2024-12-12T01:17:46,444 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. 2024-12-12T01:17:46,444 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. after waiting 0 ms 2024-12-12T01:17:46,444 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. 2024-12-12T01:17:46,444 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. 2024-12-12T01:17:46,444 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for ac9cb343f69e85ec77ebbc33279d44e8: Waiting for close lock at 1733966266444Disabling compacts and flushes for region at 1733966266444Disabling writes for close at 1733966266444Writing region close event to WAL at 1733966266444Closed at 1733966266444 2024-12-12T01:17:46,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741973_1149 (size=58) 2024-12-12T01:17:46,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741973_1149 (size=58) 2024-12-12T01:17:46,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741973_1149 (size=58) 2024-12-12T01:17:46,455 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:17:46,455 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing ca8c69b4db61e470ea5f1d9784c4d954, disabling compactions & flushes 2024-12-12T01:17:46,455 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. 2024-12-12T01:17:46,455 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. 2024-12-12T01:17:46,455 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. after waiting 0 ms 2024-12-12T01:17:46,455 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. 2024-12-12T01:17:46,455 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. 2024-12-12T01:17:46,456 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for ca8c69b4db61e470ea5f1d9784c4d954: Waiting for close lock at 1733966266455Disabling compacts and flushes for region at 1733966266455Disabling writes for close at 1733966266455Writing region close event to WAL at 1733966266455Closed at 1733966266455 2024-12-12T01:17:46,457 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T01:17:46,458 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733966266457"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966266457"}]},"ts":"1733966266457"} 2024-12-12T01:17:46,458 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733966266457"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966266457"}]},"ts":"1733966266457"} 2024-12-12T01:17:46,461 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-12T01:17:46,462 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T01:17:46,462 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966266462"}]},"ts":"1733966266462"} 2024-12-12T01:17:46,467 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-12T01:17:46,468 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:17:46,469 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:17:46,469 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:17:46,469 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:17:46,469 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:17:46,469 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:17:46,469 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:17:46,470 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:17:46,470 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:17:46,470 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:17:46,470 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:17:46,470 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ac9cb343f69e85ec77ebbc33279d44e8, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca8c69b4db61e470ea5f1d9784c4d954, ASSIGN}] 2024-12-12T01:17:46,471 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca8c69b4db61e470ea5f1d9784c4d954, ASSIGN 2024-12-12T01:17:46,471 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ac9cb343f69e85ec77ebbc33279d44e8, ASSIGN 2024-12-12T01:17:46,472 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ac9cb343f69e85ec77ebbc33279d44e8, ASSIGN; state=OFFLINE, location=36236b0d7605,38295,1733966101221; forceNewPlan=false, retain=false 2024-12-12T01:17:46,472 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca8c69b4db61e470ea5f1d9784c4d954, ASSIGN; state=OFFLINE, location=36236b0d7605,44743,1733966101408; forceNewPlan=false, retain=false 2024-12-12T01:17:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-12T01:17:46,623 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T01:17:46,623 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=ca8c69b4db61e470ea5f1d9784c4d954, regionState=OPENING, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:17:46,624 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=ac9cb343f69e85ec77ebbc33279d44e8, regionState=OPENING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:17:46,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca8c69b4db61e470ea5f1d9784c4d954, ASSIGN because future has completed 2024-12-12T01:17:46,627 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure ca8c69b4db61e470ea5f1d9784c4d954, server=36236b0d7605,44743,1733966101408}] 2024-12-12T01:17:46,628 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ac9cb343f69e85ec77ebbc33279d44e8, ASSIGN because future has completed 2024-12-12T01:17:46,629 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure ac9cb343f69e85ec77ebbc33279d44e8, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:17:46,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-12T01:17:46,784 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. 2024-12-12T01:17:46,784 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => ca8c69b4db61e470ea5f1d9784c4d954, NAME => 'testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T01:17:46,785 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. service=AccessControlService 2024-12-12T01:17:46,785 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:17:46,785 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. 2024-12-12T01:17:46,786 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:17:46,786 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => ac9cb343f69e85ec77ebbc33279d44e8, NAME => 'testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T01:17:46,786 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:17:46,786 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:17:46,786 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:17:46,786 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. service=AccessControlService 2024-12-12T01:17:46,786 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:17:46,786 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:17:46,786 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:17:46,787 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:17:46,787 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:17:46,787 INFO [StoreOpener-ca8c69b4db61e470ea5f1d9784c4d954-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:17:46,788 INFO [StoreOpener-ac9cb343f69e85ec77ebbc33279d44e8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:17:46,789 INFO [StoreOpener-ca8c69b4db61e470ea5f1d9784c4d954-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ca8c69b4db61e470ea5f1d9784c4d954 columnFamilyName cf 2024-12-12T01:17:46,789 DEBUG [StoreOpener-ca8c69b4db61e470ea5f1d9784c4d954-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:17:46,790 INFO [StoreOpener-ac9cb343f69e85ec77ebbc33279d44e8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ac9cb343f69e85ec77ebbc33279d44e8 columnFamilyName cf 2024-12-12T01:17:46,790 DEBUG [StoreOpener-ac9cb343f69e85ec77ebbc33279d44e8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:17:46,790 INFO [StoreOpener-ca8c69b4db61e470ea5f1d9784c4d954-1 {}] regionserver.HStore(327): Store=ca8c69b4db61e470ea5f1d9784c4d954/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:17:46,790 INFO [StoreOpener-ac9cb343f69e85ec77ebbc33279d44e8-1 {}] regionserver.HStore(327): Store=ac9cb343f69e85ec77ebbc33279d44e8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:17:46,790 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:17:46,790 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:17:46,791 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:17:46,791 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:17:46,791 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:17:46,792 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:17:46,792 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:17:46,792 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:17:46,792 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:17:46,792 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:17:46,794 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:17:46,794 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:17:46,798 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:17:46,798 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:17:46,798 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened ac9cb343f69e85ec77ebbc33279d44e8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62514076, jitterRate=-0.06846767663955688}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:17:46,799 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:17:46,799 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened ca8c69b4db61e470ea5f1d9784c4d954; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63585362, jitterRate=-0.052504271268844604}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:17:46,799 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:17:46,799 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for ca8c69b4db61e470ea5f1d9784c4d954: Running coprocessor pre-open hook at 1733966266786Writing region info on filesystem at 1733966266786Initializing all the Stores at 1733966266787 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966266787Cleaning up temporary data from old regions at 1733966266792 (+5 ms)Running coprocessor post-open hooks at 1733966266799 (+7 ms)Region opened successfully at 1733966266799 2024-12-12T01:17:46,799 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for ac9cb343f69e85ec77ebbc33279d44e8: Running coprocessor pre-open hook at 1733966266787Writing region info on filesystem at 1733966266787Initializing all the Stores at 1733966266787Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966266788 (+1 ms)Cleaning up temporary data from old regions at 1733966266792 (+4 ms)Running coprocessor post-open hooks at 1733966266799 (+7 ms)Region opened successfully at 1733966266799 2024-12-12T01:17:46,800 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954., pid=77, masterSystemTime=1733966266780 2024-12-12T01:17:46,800 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8., pid=78, masterSystemTime=1733966266782 2024-12-12T01:17:46,803 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. 2024-12-12T01:17:46,803 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. 2024-12-12T01:17:46,803 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=ac9cb343f69e85ec77ebbc33279d44e8, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:17:46,804 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. 2024-12-12T01:17:46,804 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. 2024-12-12T01:17:46,804 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=ca8c69b4db61e470ea5f1d9784c4d954, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:17:46,806 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=36236b0d7605,38295,1733966101221, table=testExportWithResetTtl, region=ac9cb343f69e85ec77ebbc33279d44e8. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-12T01:17:46,807 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=36236b0d7605,44743,1733966101408, table=testExportWithResetTtl, region=ca8c69b4db61e470ea5f1d9784c4d954. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-12T01:17:46,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure ac9cb343f69e85ec77ebbc33279d44e8, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:17:46,815 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure ca8c69b4db61e470ea5f1d9784c4d954, server=36236b0d7605,44743,1733966101408 because future has completed 2024-12-12T01:17:46,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=75 2024-12-12T01:17:46,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure ac9cb343f69e85ec77ebbc33279d44e8, server=36236b0d7605,38295,1733966101221 in 187 msec 2024-12-12T01:17:46,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=76 2024-12-12T01:17:46,819 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure ca8c69b4db61e470ea5f1d9784c4d954, server=36236b0d7605,44743,1733966101408 in 190 msec 2024-12-12T01:17:46,819 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ac9cb343f69e85ec77ebbc33279d44e8, ASSIGN in 348 msec 2024-12-12T01:17:46,821 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=76, resume processing ppid=74 2024-12-12T01:17:46,821 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca8c69b4db61e470ea5f1d9784c4d954, ASSIGN in 349 msec 2024-12-12T01:17:46,821 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T01:17:46,821 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966266821"}]},"ts":"1733966266821"} 2024-12-12T01:17:46,823 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-12T01:17:46,824 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T01:17:46,824 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-12T01:17:46,827 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-12T01:17:46,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:46,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:46,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:46,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:17:46,836 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:46,836 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:46,836 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:46,836 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:46,837 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:46,837 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:46,837 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:46,837 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:17:46,838 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 427 msec 2024-12-12T01:17:47,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-12T01:17:47,035 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-12-12T01:17:47,035 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-12T01:17:47,035 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:17:47,038 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-12T01:17:47,039 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:17:47,039 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportWithResetTtl assigned. 2024-12-12T01:17:47,039 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-12T01:17:47,044 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='0a9df492778bfc3006de7cfc87533a526', locateType=CURRENT is [region=testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:17:47,045 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='16be59342342908eb05dc071a52276bcc', locateType=CURRENT is [region=testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:47,046 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='2c19185f78be1de02a40b49c828f23c2d', locateType=CURRENT is [region=testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:47,047 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='30970a81f00d69b1f82ddd0716f72c1c8', locateType=CURRENT is [region=testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:47,048 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='484f01185f65875613bc29aa8defdabd6', locateType=CURRENT is [region=testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:47,049 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='5e4b00aa984a795643dc85fd3c9750414', locateType=CURRENT is [region=testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:47,052 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:17:47,055 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44743 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:17:47,056 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-12T01:17:47,059 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-12T01:17:47,059 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. 2024-12-12T01:17:47,059 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:17:47,061 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-12T01:17:47,065 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-12T01:17:47,071 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-12T01:17:47,074 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-12T01:17:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966267074 (current time:1733966267074). 2024-12-12T01:17:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-12T01:17:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:17:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@506d0789, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:47,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:17:47,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:17:47,075 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:17:47,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:17:47,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:17:47,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a727c11, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:47,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:17:47,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:17:47,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:47,077 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43680, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:17:47,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c0f852f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:47,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:17:47,079 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:17:47,079 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:17:47,080 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52398, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:17:47,081 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:17:47,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:17:47,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:47,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:47,081 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:17:47,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11a8dab9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:47,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:17:47,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:17:47,083 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:17:47,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:17:47,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:17:47,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49f87eae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:47,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:17:47,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:17:47,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:47,084 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43704, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:17:47,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20941b12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:17:47,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:17:47,086 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:17:47,086 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:17:47,087 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52408, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:17:47,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:17:47,090 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:17:47,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:17:47,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:47,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:17:47,090 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:17:47,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-12T01:17:47,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:17:47,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-12T01:17:47,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-12T01:17:47,093 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:17:47,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-12T01:17:47,094 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:17:47,096 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:17:47,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741974_1150 (size=143) 2024-12-12T01:17:47,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741974_1150 (size=143) 2024-12-12T01:17:47,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741974_1150 (size=143) 2024-12-12T01:17:47,104 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:17:47,104 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac9cb343f69e85ec77ebbc33279d44e8}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ca8c69b4db61e470ea5f1d9784c4d954}] 2024-12-12T01:17:47,105 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:17:47,105 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:17:47,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-12T01:17:47,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-12-12T01:17:47,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44743 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-12-12T01:17:47,257 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. 2024-12-12T01:17:47,257 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. 2024-12-12T01:17:47,258 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing ac9cb343f69e85ec77ebbc33279d44e8 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-12T01:17:47,258 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing ca8c69b4db61e470ea5f1d9784c4d954 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-12T01:17:47,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8/.tmp/cf/36b1e0f1189e439d99b9326a7043b731 is 71, key is 0a6fc2d4cfa877c790bac25975a1d1af/cf:q/1733966267052/Put/seqid=0 2024-12-12T01:17:47,284 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954/.tmp/cf/6ca8b8f6a1b74f00bb736ea8afb62f2d is 71, key is 1b344ae618c6c54dae35221b56cd1768/cf:q/1733966267055/Put/seqid=0 2024-12-12T01:17:47,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741975_1151 (size=5286) 2024-12-12T01:17:47,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741975_1151 (size=5286) 2024-12-12T01:17:47,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741975_1151 (size=5286) 2024-12-12T01:17:47,297 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8/.tmp/cf/36b1e0f1189e439d99b9326a7043b731 2024-12-12T01:17:47,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741976_1152 (size=8326) 2024-12-12T01:17:47,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741976_1152 (size=8326) 2024-12-12T01:17:47,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741976_1152 (size=8326) 2024-12-12T01:17:47,304 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954/.tmp/cf/6ca8b8f6a1b74f00bb736ea8afb62f2d 2024-12-12T01:17:47,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8/.tmp/cf/36b1e0f1189e439d99b9326a7043b731 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8/cf/36b1e0f1189e439d99b9326a7043b731 2024-12-12T01:17:47,311 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954/.tmp/cf/6ca8b8f6a1b74f00bb736ea8afb62f2d as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954/cf/6ca8b8f6a1b74f00bb736ea8afb62f2d 2024-12-12T01:17:47,312 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8/cf/36b1e0f1189e439d99b9326a7043b731, entries=3, sequenceid=5, filesize=5.2 K 2024-12-12T01:17:47,314 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for ac9cb343f69e85ec77ebbc33279d44e8 in 56ms, sequenceid=5, compaction requested=false 2024-12-12T01:17:47,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-12T01:17:47,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for ac9cb343f69e85ec77ebbc33279d44e8: 2024-12-12T01:17:47,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. for snaptb-testExportWithResetTtl completed. 2024-12-12T01:17:47,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-12T01:17:47,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:17:47,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8/cf/36b1e0f1189e439d99b9326a7043b731] hfiles 2024-12-12T01:17:47,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8/cf/36b1e0f1189e439d99b9326a7043b731 for snapshot=snaptb-testExportWithResetTtl 2024-12-12T01:17:47,320 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954/cf/6ca8b8f6a1b74f00bb736ea8afb62f2d, entries=47, sequenceid=5, filesize=8.1 K 2024-12-12T01:17:47,321 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for ca8c69b4db61e470ea5f1d9784c4d954 in 63ms, sequenceid=5, compaction requested=false 2024-12-12T01:17:47,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for ca8c69b4db61e470ea5f1d9784c4d954: 2024-12-12T01:17:47,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. for snaptb-testExportWithResetTtl completed. 2024-12-12T01:17:47,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-12T01:17:47,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:17:47,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954/cf/6ca8b8f6a1b74f00bb736ea8afb62f2d] hfiles 2024-12-12T01:17:47,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954/cf/6ca8b8f6a1b74f00bb736ea8afb62f2d for snapshot=snaptb-testExportWithResetTtl 2024-12-12T01:17:47,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741977_1153 (size=100) 2024-12-12T01:17:47,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741977_1153 (size=100) 2024-12-12T01:17:47,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741977_1153 (size=100) 2024-12-12T01:17:47,331 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. 2024-12-12T01:17:47,331 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-12T01:17:47,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-12-12T01:17:47,332 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:17:47,332 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:17:47,335 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ac9cb343f69e85ec77ebbc33279d44e8 in 229 msec 2024-12-12T01:17:47,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741978_1154 (size=100) 2024-12-12T01:17:47,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741978_1154 (size=100) 2024-12-12T01:17:47,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741978_1154 (size=100) 2024-12-12T01:17:47,348 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. 2024-12-12T01:17:47,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-12-12T01:17:47,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-12-12T01:17:47,349 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:17:47,350 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:17:47,353 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=81, resume processing ppid=79 2024-12-12T01:17:47,353 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:17:47,353 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ca8c69b4db61e470ea5f1d9784c4d954 in 246 msec 2024-12-12T01:17:47,354 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:17:47,354 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:17:47,354 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-12T01:17:47,355 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-12T01:17:47,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741979_1155 (size=600) 2024-12-12T01:17:47,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741979_1155 (size=600) 2024-12-12T01:17:47,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741979_1155 (size=600) 2024-12-12T01:17:47,376 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:17:47,382 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:17:47,383 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-12T01:17:47,384 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:17:47,384 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-12T01:17:47,385 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 293 msec 2024-12-12T01:17:47,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-12T01:17:47,415 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-12-12T01:17:47,426 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966267426 2024-12-12T01:17:47,426 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33353, tgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966267426, rawTgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966267426, srcFsUri=hdfs://localhost:33353, srcDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:17:47,473 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33353, inputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:17:47,473 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966267426, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966267426/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-12T01:17:47,475 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-12T01:17:47,481 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966267426/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-12T01:17:47,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741980_1156 (size=143) 2024-12-12T01:17:47,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741980_1156 (size=143) 2024-12-12T01:17:47,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741981_1157 (size=600) 2024-12-12T01:17:47,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741980_1156 (size=143) 2024-12-12T01:17:47,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741981_1157 (size=600) 2024-12-12T01:17:47,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741981_1157 (size=600) 2024-12-12T01:17:47,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741982_1158 (size=141) 2024-12-12T01:17:47,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741982_1158 (size=141) 2024-12-12T01:17:47,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741982_1158 (size=141) 2024-12-12T01:17:47,907 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:47,908 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:47,908 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:49,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-1549732054229645485.jar 2024-12-12T01:17:49,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:49,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:49,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-2031202699340211551.jar 2024-12-12T01:17:49,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:49,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:49,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:49,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:49,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:49,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:17:49,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T01:17:49,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T01:17:49,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T01:17:49,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T01:17:49,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T01:17:49,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T01:17:49,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T01:17:49,187 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T01:17:49,188 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T01:17:49,188 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T01:17:49,188 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T01:17:49,189 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:17:49,189 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:17:49,189 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:17:49,189 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:17:49,190 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:17:49,190 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:17:49,190 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:17:49,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741983_1159 (size=131440) 2024-12-12T01:17:49,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741983_1159 (size=131440) 2024-12-12T01:17:49,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741983_1159 (size=131440) 2024-12-12T01:17:49,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741984_1160 (size=4188619) 2024-12-12T01:17:49,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741984_1160 (size=4188619) 2024-12-12T01:17:49,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741984_1160 (size=4188619) 2024-12-12T01:17:49,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741985_1161 (size=1323991) 2024-12-12T01:17:49,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741985_1161 (size=1323991) 2024-12-12T01:17:49,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741985_1161 (size=1323991) 2024-12-12T01:17:49,457 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0002_000001 (auth:SIMPLE) from 127.0.0.1:42834 2024-12-12T01:17:49,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741986_1162 (size=903541) 2024-12-12T01:17:49,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741986_1162 (size=903541) 2024-12-12T01:17:49,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741986_1162 (size=903541) 2024-12-12T01:17:49,501 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0002/container_1733966109028_0002_01_000001/launch_container.sh] 2024-12-12T01:17:49,501 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0002/container_1733966109028_0002_01_000001/container_tokens] 2024-12-12T01:17:49,501 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0002/container_1733966109028_0002_01_000001/sysfs] 2024-12-12T01:17:49,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741987_1163 (size=8360360) 2024-12-12T01:17:49,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741987_1163 (size=8360360) 2024-12-12T01:17:49,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741987_1163 (size=8360360) 2024-12-12T01:17:49,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741988_1164 (size=443172) 2024-12-12T01:17:49,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741988_1164 (size=443172) 2024-12-12T01:17:49,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741988_1164 (size=443172) 2024-12-12T01:17:49,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741989_1165 (size=1877034) 2024-12-12T01:17:49,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741989_1165 (size=1877034) 2024-12-12T01:17:49,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741989_1165 (size=1877034) 2024-12-12T01:17:49,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741990_1166 (size=77835) 2024-12-12T01:17:49,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741990_1166 (size=77835) 2024-12-12T01:17:49,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741990_1166 (size=77835) 2024-12-12T01:17:49,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741991_1167 (size=30949) 2024-12-12T01:17:49,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741991_1167 (size=30949) 2024-12-12T01:17:49,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741991_1167 (size=30949) 2024-12-12T01:17:49,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741992_1168 (size=1597213) 2024-12-12T01:17:49,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741992_1168 (size=1597213) 2024-12-12T01:17:49,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741992_1168 (size=1597213) 2024-12-12T01:17:50,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741993_1169 (size=6426224) 2024-12-12T01:17:50,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741993_1169 (size=6426224) 2024-12-12T01:17:50,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741993_1169 (size=6426224) 2024-12-12T01:17:50,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741994_1170 (size=4695811) 2024-12-12T01:17:50,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741994_1170 (size=4695811) 2024-12-12T01:17:50,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741994_1170 (size=4695811) 2024-12-12T01:17:50,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741995_1171 (size=232957) 2024-12-12T01:17:50,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741995_1171 (size=232957) 2024-12-12T01:17:50,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741995_1171 (size=232957) 2024-12-12T01:17:50,306 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:17:50,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741996_1172 (size=127628) 2024-12-12T01:17:50,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741996_1172 (size=127628) 2024-12-12T01:17:50,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741996_1172 (size=127628) 2024-12-12T01:17:50,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741997_1173 (size=20406) 2024-12-12T01:17:50,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741997_1173 (size=20406) 2024-12-12T01:17:50,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741997_1173 (size=20406) 2024-12-12T01:17:50,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741998_1174 (size=5175431) 2024-12-12T01:17:50,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741998_1174 (size=5175431) 2024-12-12T01:17:50,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741998_1174 (size=5175431) 2024-12-12T01:17:50,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741999_1175 (size=217634) 2024-12-12T01:17:50,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741999_1175 (size=217634) 2024-12-12T01:17:50,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741999_1175 (size=217634) 2024-12-12T01:17:50,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742000_1176 (size=1832290) 2024-12-12T01:17:50,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742000_1176 (size=1832290) 2024-12-12T01:17:50,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742000_1176 (size=1832290) 2024-12-12T01:17:50,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742001_1177 (size=322274) 2024-12-12T01:17:50,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742001_1177 (size=322274) 2024-12-12T01:17:50,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742001_1177 (size=322274) 2024-12-12T01:17:50,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742002_1178 (size=503880) 2024-12-12T01:17:50,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742002_1178 (size=503880) 2024-12-12T01:17:50,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742002_1178 (size=503880) 2024-12-12T01:17:50,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742003_1179 (size=29229) 2024-12-12T01:17:50,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742003_1179 (size=29229) 2024-12-12T01:17:50,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742003_1179 (size=29229) 2024-12-12T01:17:50,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742004_1180 (size=24096) 2024-12-12T01:17:50,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742004_1180 (size=24096) 2024-12-12T01:17:50,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742004_1180 (size=24096) 2024-12-12T01:17:50,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742005_1181 (size=111872) 2024-12-12T01:17:50,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742005_1181 (size=111872) 2024-12-12T01:17:50,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742005_1181 (size=111872) 2024-12-12T01:17:50,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742006_1182 (size=45609) 2024-12-12T01:17:50,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742006_1182 (size=45609) 2024-12-12T01:17:50,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742006_1182 (size=45609) 2024-12-12T01:17:50,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742007_1183 (size=136454) 2024-12-12T01:17:50,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742007_1183 (size=136454) 2024-12-12T01:17:50,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742007_1183 (size=136454) 2024-12-12T01:17:50,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-12T01:17:50,712 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-12T01:17:50,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-12T01:17:50,713 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-12T01:17:50,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-12T01:17:51,079 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T01:17:51,082 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-12T01:17:51,085 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-12T01:17:51,085 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-12T01:17:51,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742008_1184 (size=427) 2024-12-12T01:17:51,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742008_1184 (size=427) 2024-12-12T01:17:51,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742008_1184 (size=427) 2024-12-12T01:17:51,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742009_1185 (size=21) 2024-12-12T01:17:51,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742009_1185 (size=21) 2024-12-12T01:17:51,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742009_1185 (size=21) 2024-12-12T01:17:51,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742010_1186 (size=303994) 2024-12-12T01:17:51,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742010_1186 (size=303994) 2024-12-12T01:17:51,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742010_1186 (size=303994) 2024-12-12T01:17:51,552 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:17:51,552 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:17:52,248 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0003_000001 (auth:SIMPLE) from 127.0.0.1:36574 2024-12-12T01:17:56,216 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:17:58,909 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:18:00,748 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0003_000001 (auth:SIMPLE) from 127.0.0.1:43876 2024-12-12T01:18:01,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742011_1187 (size=349692) 2024-12-12T01:18:01,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742011_1187 (size=349692) 2024-12-12T01:18:01,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742011_1187 (size=349692) 2024-12-12T01:18:03,265 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0003_000001 (auth:SIMPLE) from 127.0.0.1:52792 2024-12-12T01:18:03,270 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0003_000001 (auth:SIMPLE) from 127.0.0.1:48176 2024-12-12T01:18:04,742 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5edcb373fdf5909fc7a4f8d149a52280 changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:18:04,742 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ac9cb343f69e85ec77ebbc33279d44e8 changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:18:04,742 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ca8c69b4db61e470ea5f1d9784c4d954 changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:18:04,742 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 7c31809c67fcfd8c14b5f7d7fce44ac2 changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:18:08,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742012_1188 (size=8326) 2024-12-12T01:18:08,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742012_1188 (size=8326) 2024-12-12T01:18:08,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742012_1188 (size=8326) 2024-12-12T01:18:08,447 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_1/usercache/jenkins/appcache/application_1733966109028_0003/container_1733966109028_0003_01_000002/launch_container.sh] 2024-12-12T01:18:08,447 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_1/usercache/jenkins/appcache/application_1733966109028_0003/container_1733966109028_0003_01_000002/container_tokens] 2024-12-12T01:18:08,447 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_1/usercache/jenkins/appcache/application_1733966109028_0003/container_1733966109028_0003_01_000002/sysfs] 2024-12-12T01:18:09,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742014_1190 (size=5286) 2024-12-12T01:18:09,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742014_1190 (size=5286) 2024-12-12T01:18:09,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742014_1190 (size=5286) 2024-12-12T01:18:09,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742013_1189 (size=22124) 2024-12-12T01:18:09,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742013_1189 (size=22124) 2024-12-12T01:18:09,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742013_1189 (size=22124) 2024-12-12T01:18:09,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742015_1191 (size=462) 2024-12-12T01:18:09,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742015_1191 (size=462) 2024-12-12T01:18:09,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742015_1191 (size=462) 2024-12-12T01:18:09,525 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0003/container_1733966109028_0003_01_000003/launch_container.sh] 2024-12-12T01:18:09,525 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0003/container_1733966109028_0003_01_000003/container_tokens] 2024-12-12T01:18:09,525 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0003/container_1733966109028_0003_01_000003/sysfs] 2024-12-12T01:18:09,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742016_1192 (size=22124) 2024-12-12T01:18:09,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742016_1192 (size=22124) 2024-12-12T01:18:09,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742016_1192 (size=22124) 2024-12-12T01:18:09,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742017_1193 (size=349692) 2024-12-12T01:18:09,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742017_1193 (size=349692) 2024-12-12T01:18:09,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742017_1193 (size=349692) 2024-12-12T01:18:09,578 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0003_000001 (auth:SIMPLE) from 127.0.0.1:52804 2024-12-12T01:18:10,957 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-12T01:18:10,958 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-12T01:18:10,965 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-12-12T01:18:10,965 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-12T01:18:10,966 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-12T01:18:10,966 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-12T01:18:10,967 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-12T01:18:10,967 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-12T01:18:10,967 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966267426/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966267426/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-12T01:18:10,967 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966267426/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-12T01:18:10,967 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966267426/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-12T01:18:10,975 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-12T01:18:10,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-12-12T01:18:10,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-12T01:18:10,978 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966290978"}]},"ts":"1733966290978"} 2024-12-12T01:18:10,980 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-12T01:18:10,981 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-12T01:18:10,981 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-12T01:18:10,983 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ac9cb343f69e85ec77ebbc33279d44e8, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca8c69b4db61e470ea5f1d9784c4d954, UNASSIGN}] 2024-12-12T01:18:10,984 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca8c69b4db61e470ea5f1d9784c4d954, UNASSIGN 2024-12-12T01:18:10,984 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ac9cb343f69e85ec77ebbc33279d44e8, UNASSIGN 2024-12-12T01:18:10,985 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=ca8c69b4db61e470ea5f1d9784c4d954, regionState=CLOSING, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:18:10,985 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=ac9cb343f69e85ec77ebbc33279d44e8, regionState=CLOSING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:18:10,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca8c69b4db61e470ea5f1d9784c4d954, UNASSIGN because future has completed 2024-12-12T01:18:10,987 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:18:10,987 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure ca8c69b4db61e470ea5f1d9784c4d954, server=36236b0d7605,44743,1733966101408}] 2024-12-12T01:18:10,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ac9cb343f69e85ec77ebbc33279d44e8, UNASSIGN because future has completed 2024-12-12T01:18:10,988 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:18:10,988 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure ac9cb343f69e85ec77ebbc33279d44e8, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:18:11,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-12T01:18:11,139 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:18:11,139 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:18:11,139 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing ca8c69b4db61e470ea5f1d9784c4d954, disabling compactions & flushes 2024-12-12T01:18:11,139 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. 2024-12-12T01:18:11,139 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. 2024-12-12T01:18:11,140 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. after waiting 0 ms 2024-12-12T01:18:11,140 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. 2024-12-12T01:18:11,141 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:18:11,141 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:18:11,141 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing ac9cb343f69e85ec77ebbc33279d44e8, disabling compactions & flushes 2024-12-12T01:18:11,142 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. 2024-12-12T01:18:11,142 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. 2024-12-12T01:18:11,142 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. after waiting 0 ms 2024-12-12T01:18:11,142 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. 2024-12-12T01:18:11,144 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-12T01:18:11,145 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:18:11,145 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954. 2024-12-12T01:18:11,145 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for ca8c69b4db61e470ea5f1d9784c4d954: Waiting for close lock at 1733966291139Running coprocessor pre-close hooks at 1733966291139Disabling compacts and flushes for region at 1733966291139Disabling writes for close at 1733966291140 (+1 ms)Writing region close event to WAL at 1733966291140Running coprocessor post-close hooks at 1733966291145 (+5 ms)Closed at 1733966291145 2024-12-12T01:18:11,146 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-12T01:18:11,146 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:18:11,146 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8. 2024-12-12T01:18:11,147 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for ac9cb343f69e85ec77ebbc33279d44e8: Waiting for close lock at 1733966291141Running coprocessor pre-close hooks at 1733966291141Disabling compacts and flushes for region at 1733966291141Disabling writes for close at 1733966291142 (+1 ms)Writing region close event to WAL at 1733966291142Running coprocessor post-close hooks at 1733966291146 (+4 ms)Closed at 1733966291146 2024-12-12T01:18:11,147 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:18:11,147 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=ca8c69b4db61e470ea5f1d9784c4d954, regionState=CLOSED 2024-12-12T01:18:11,148 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:18:11,149 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=ac9cb343f69e85ec77ebbc33279d44e8, regionState=CLOSED 2024-12-12T01:18:11,149 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure ca8c69b4db61e470ea5f1d9784c4d954, server=36236b0d7605,44743,1733966101408 because future has completed 2024-12-12T01:18:11,151 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure ac9cb343f69e85ec77ebbc33279d44e8, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:18:11,152 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=85 2024-12-12T01:18:11,152 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure ca8c69b4db61e470ea5f1d9784c4d954, server=36236b0d7605,44743,1733966101408 in 163 msec 2024-12-12T01:18:11,153 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=84 2024-12-12T01:18:11,153 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca8c69b4db61e470ea5f1d9784c4d954, UNASSIGN in 169 msec 2024-12-12T01:18:11,153 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure ac9cb343f69e85ec77ebbc33279d44e8, server=36236b0d7605,38295,1733966101221 in 164 msec 2024-12-12T01:18:11,155 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=84, resume processing ppid=83 2024-12-12T01:18:11,155 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ac9cb343f69e85ec77ebbc33279d44e8, UNASSIGN in 170 msec 2024-12-12T01:18:11,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-12-12T01:18:11,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 174 msec 2024-12-12T01:18:11,158 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966291158"}]},"ts":"1733966291158"} 2024-12-12T01:18:11,160 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-12T01:18:11,160 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-12T01:18:11,162 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 185 msec 2024-12-12T01:18:11,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-12T01:18:11,295 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-12-12T01:18:11,295 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-12T01:18:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-12-12T01:18:11,298 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-12T01:18:11,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-12-12T01:18:11,298 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-12T01:18:11,302 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-12T01:18:11,304 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:18:11,304 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:18:11,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T01:18:11,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T01:18:11,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T01:18:11,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T01:18:11,306 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-12T01:18:11,306 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-12T01:18:11,306 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-12T01:18:11,306 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-12T01:18:11,308 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954/recovered.edits] 2024-12-12T01:18:11,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T01:18:11,308 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8/recovered.edits] 2024-12-12T01:18:11,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T01:18:11,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T01:18:11,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:11,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:11,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-12T01:18:11,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:11,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:11,309 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:18:11,309 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:18:11,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-12T01:18:11,310 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:18:11,311 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-12T01:18:11,314 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8/cf/36b1e0f1189e439d99b9326a7043b731 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8/cf/36b1e0f1189e439d99b9326a7043b731 2024-12-12T01:18:11,315 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954/cf/6ca8b8f6a1b74f00bb736ea8afb62f2d to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954/cf/6ca8b8f6a1b74f00bb736ea8afb62f2d 2024-12-12T01:18:11,318 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8/recovered.edits/8.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8/recovered.edits/8.seqid 2024-12-12T01:18:11,319 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ac9cb343f69e85ec77ebbc33279d44e8 2024-12-12T01:18:11,319 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954/recovered.edits/8.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954/recovered.edits/8.seqid 2024-12-12T01:18:11,319 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportWithResetTtl/ca8c69b4db61e470ea5f1d9784c4d954 2024-12-12T01:18:11,320 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-12T01:18:11,322 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-12T01:18:11,326 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-12T01:18:11,331 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testExportWithResetTtl' descriptor. 2024-12-12T01:18:11,332 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-12T01:18:11,332 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testExportWithResetTtl' from region states. 2024-12-12T01:18:11,332 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966291332"}]},"ts":"9223372036854775807"} 2024-12-12T01:18:11,333 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966291332"}]},"ts":"9223372036854775807"} 2024-12-12T01:18:11,336 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-12T01:18:11,336 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ac9cb343f69e85ec77ebbc33279d44e8, NAME => 'testExportWithResetTtl,,1733966266407.ac9cb343f69e85ec77ebbc33279d44e8.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ca8c69b4db61e470ea5f1d9784c4d954, NAME => 'testExportWithResetTtl,1,1733966266407.ca8c69b4db61e470ea5f1d9784c4d954.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T01:18:11,336 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testExportWithResetTtl' as deleted. 2024-12-12T01:18:11,336 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733966291336"}]},"ts":"9223372036854775807"} 2024-12-12T01:18:11,339 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-12-12T01:18:11,340 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-12T01:18:11,341 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 45 msec 2024-12-12T01:18:11,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-12T01:18:11,415 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-12-12T01:18:11,415 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-12-12T01:18:11,416 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-12T01:18:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T01:18:11,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-12T01:18:11,420 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966291420"}]},"ts":"1733966291420"} 2024-12-12T01:18:11,422 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-12T01:18:11,422 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-12T01:18:11,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-12T01:18:11,424 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=5edcb373fdf5909fc7a4f8d149a52280, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7c31809c67fcfd8c14b5f7d7fce44ac2, UNASSIGN}] 2024-12-12T01:18:11,425 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7c31809c67fcfd8c14b5f7d7fce44ac2, UNASSIGN 2024-12-12T01:18:11,425 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=5edcb373fdf5909fc7a4f8d149a52280, UNASSIGN 2024-12-12T01:18:11,426 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=7c31809c67fcfd8c14b5f7d7fce44ac2, regionState=CLOSING, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:18:11,426 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=5edcb373fdf5909fc7a4f8d149a52280, regionState=CLOSING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:18:11,428 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7c31809c67fcfd8c14b5f7d7fce44ac2, UNASSIGN because future has completed 2024-12-12T01:18:11,428 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:18:11,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7c31809c67fcfd8c14b5f7d7fce44ac2, server=36236b0d7605,44743,1733966101408}] 2024-12-12T01:18:11,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=5edcb373fdf5909fc7a4f8d149a52280, UNASSIGN because future has completed 2024-12-12T01:18:11,429 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:18:11,429 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5edcb373fdf5909fc7a4f8d149a52280, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:18:11,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-12T01:18:11,581 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:18:11,581 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:18:11,581 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing 7c31809c67fcfd8c14b5f7d7fce44ac2, disabling compactions & flushes 2024-12-12T01:18:11,581 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. 2024-12-12T01:18:11,581 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. 2024-12-12T01:18:11,581 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. after waiting 0 ms 2024-12-12T01:18:11,581 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. 2024-12-12T01:18:11,583 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:18:11,583 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:18:11,583 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing 5edcb373fdf5909fc7a4f8d149a52280, disabling compactions & flushes 2024-12-12T01:18:11,583 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:18:11,583 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:18:11,583 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. after waiting 0 ms 2024-12-12T01:18:11,583 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:18:11,586 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:18:11,587 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:18:11,587 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2. 2024-12-12T01:18:11,587 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for 7c31809c67fcfd8c14b5f7d7fce44ac2: Waiting for close lock at 1733966291581Running coprocessor pre-close hooks at 1733966291581Disabling compacts and flushes for region at 1733966291581Disabling writes for close at 1733966291581Writing region close event to WAL at 1733966291582 (+1 ms)Running coprocessor post-close hooks at 1733966291587 (+5 ms)Closed at 1733966291587 2024-12-12T01:18:11,588 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:18:11,589 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:18:11,589 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280. 2024-12-12T01:18:11,589 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for 5edcb373fdf5909fc7a4f8d149a52280: Waiting for close lock at 1733966291583Running coprocessor pre-close hooks at 1733966291583Disabling compacts and flushes for region at 1733966291583Disabling writes for close at 1733966291583Writing region close event to WAL at 1733966291585 (+2 ms)Running coprocessor post-close hooks at 1733966291589 (+4 ms)Closed at 1733966291589 2024-12-12T01:18:11,590 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed 7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:18:11,590 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=7c31809c67fcfd8c14b5f7d7fce44ac2, regionState=CLOSED 2024-12-12T01:18:11,592 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed 5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:18:11,592 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=5edcb373fdf5909fc7a4f8d149a52280, regionState=CLOSED 2024-12-12T01:18:11,592 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7c31809c67fcfd8c14b5f7d7fce44ac2, server=36236b0d7605,44743,1733966101408 because future has completed 2024-12-12T01:18:11,594 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5edcb373fdf5909fc7a4f8d149a52280, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:18:11,595 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=92 2024-12-12T01:18:11,595 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure 7c31809c67fcfd8c14b5f7d7fce44ac2, server=36236b0d7605,44743,1733966101408 in 165 msec 2024-12-12T01:18:11,597 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=7c31809c67fcfd8c14b5f7d7fce44ac2, UNASSIGN in 171 msec 2024-12-12T01:18:11,598 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=91 2024-12-12T01:18:11,598 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure 5edcb373fdf5909fc7a4f8d149a52280, server=36236b0d7605,33883,1733966101479 in 167 msec 2024-12-12T01:18:11,600 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=90 2024-12-12T01:18:11,600 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=5edcb373fdf5909fc7a4f8d149a52280, UNASSIGN in 174 msec 2024-12-12T01:18:11,602 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-12-12T01:18:11,602 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 179 msec 2024-12-12T01:18:11,604 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966291604"}]},"ts":"1733966291604"} 2024-12-12T01:18:11,606 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-12T01:18:11,606 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-12T01:18:11,607 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 190 msec 2024-12-12T01:18:11,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-12T01:18:11,736 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-12T01:18:11,737 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-12T01:18:11,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T01:18:11,739 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T01:18:11,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-12T01:18:11,740 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T01:18:11,742 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-12T01:18:11,744 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:18:11,744 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:18:11,747 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2/recovered.edits] 2024-12-12T01:18:11,747 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280/recovered.edits] 2024-12-12T01:18:11,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-12T01:18:11,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-12T01:18:11,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-12T01:18:11,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-12T01:18:11,748 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-12T01:18:11,748 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-12T01:18:11,748 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-12T01:18:11,748 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-12T01:18:11,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-12T01:18:11,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-12T01:18:11,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-12T01:18:11,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:11,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-12T01:18:11,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:11,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-12T01:18:11,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:11,753 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280/cf/25643f0fd89e4053a0043d95293d7dfe to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280/cf/25643f0fd89e4053a0043d95293d7dfe 2024-12-12T01:18:11,753 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2/cf/48cc012447c24bb8ad6f381768691a92 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2/cf/48cc012447c24bb8ad6f381768691a92 2024-12-12T01:18:11,762 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2/recovered.edits/9.seqid 2024-12-12T01:18:11,762 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280/recovered.edits/9.seqid 2024-12-12T01:18:11,764 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/7c31809c67fcfd8c14b5f7d7fce44ac2 2024-12-12T01:18:11,764 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithResetTtl/5edcb373fdf5909fc7a4f8d149a52280 2024-12-12T01:18:11,764 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-12T01:18:11,767 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T01:18:11,773 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-12T01:18:11,776 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-12T01:18:11,777 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T01:18:11,777 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-12T01:18:11,778 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966291777"}]},"ts":"9223372036854775807"} 2024-12-12T01:18:11,778 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966291777"}]},"ts":"9223372036854775807"} 2024-12-12T01:18:11,780 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-12T01:18:11,780 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 5edcb373fdf5909fc7a4f8d149a52280, NAME => 'testtb-testExportWithResetTtl,,1733966265045.5edcb373fdf5909fc7a4f8d149a52280.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 7c31809c67fcfd8c14b5f7d7fce44ac2, NAME => 'testtb-testExportWithResetTtl,1,1733966265045.7c31809c67fcfd8c14b5f7d7fce44ac2.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T01:18:11,780 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-12T01:18:11,780 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733966291780"}]},"ts":"9223372036854775807"} 2024-12-12T01:18:11,783 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-12-12T01:18:11,783 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-12T01:18:11,785 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 46 msec 2024-12-12T01:18:11,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-12T01:18:11,856 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-12-12T01:18:11,856 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-12T01:18:11,867 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-12-12T01:18:11,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-12T01:18:11,872 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-12-12T01:18:11,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-12T01:18:11,875 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-12-12T01:18:11,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-12T01:18:11,902 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=792 (was 784) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:33060 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:34899 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:46615 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: process reaper (pid 23530) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:49826 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_24606686_1 at /127.0.0.1:49790 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2965 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46615 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:60660 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=790 (was 782) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=702 (was 630) - SystemLoadAverage LEAK? -, ProcessCount=14 (was 14), AvailableMemoryMB=6662 (was 6943) 2024-12-12T01:18:11,902 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-12-12T01:18:11,925 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=792, OpenFileDescriptor=790, MaxFileDescriptor=1048576, SystemLoadAverage=702, ProcessCount=14, AvailableMemoryMB=6657 2024-12-12T01:18:11,925 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-12-12T01:18:11,927 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T01:18:11,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-12T01:18:11,929 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T01:18:11,929 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:18:11,929 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-12-12T01:18:11,930 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T01:18:11,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-12T01:18:11,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742018_1194 (size=407) 2024-12-12T01:18:11,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742018_1194 (size=407) 2024-12-12T01:18:11,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742018_1194 (size=407) 2024-12-12T01:18:11,952 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5e44fdb7f11c8751e6f4cd080c058df0, NAME => 'testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:18:11,953 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => d7b81fdefbe8adc7f0ef823ba2a0e2ec, NAME => 'testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:18:11,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742019_1195 (size=68) 2024-12-12T01:18:11,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742019_1195 (size=68) 2024-12-12T01:18:11,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742019_1195 (size=68) 2024-12-12T01:18:11,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:18:11,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742020_1196 (size=68) 2024-12-12T01:18:11,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 5e44fdb7f11c8751e6f4cd080c058df0, disabling compactions & flushes 2024-12-12T01:18:11,989 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:11,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:11,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. after waiting 0 ms 2024-12-12T01:18:11,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:11,989 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:11,989 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5e44fdb7f11c8751e6f4cd080c058df0: Waiting for close lock at 1733966291989Disabling compacts and flushes for region at 1733966291989Disabling writes for close at 1733966291989Writing region close event to WAL at 1733966291989Closed at 1733966291989 2024-12-12T01:18:11,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742020_1196 (size=68) 2024-12-12T01:18:11,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742020_1196 (size=68) 2024-12-12T01:18:11,991 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:18:11,991 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing d7b81fdefbe8adc7f0ef823ba2a0e2ec, disabling compactions & flushes 2024-12-12T01:18:11,991 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. 2024-12-12T01:18:11,991 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. 2024-12-12T01:18:11,991 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. after waiting 0 ms 2024-12-12T01:18:11,991 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. 2024-12-12T01:18:11,991 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. 2024-12-12T01:18:11,991 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for d7b81fdefbe8adc7f0ef823ba2a0e2ec: Waiting for close lock at 1733966291991Disabling compacts and flushes for region at 1733966291991Disabling writes for close at 1733966291991Writing region close event to WAL at 1733966291991Closed at 1733966291991 2024-12-12T01:18:11,992 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T01:18:11,993 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733966291992"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966291992"}]},"ts":"1733966291992"} 2024-12-12T01:18:11,993 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733966291992"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966291992"}]},"ts":"1733966291992"} 2024-12-12T01:18:11,996 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-12T01:18:11,997 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T01:18:11,997 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966291997"}]},"ts":"1733966291997"} 2024-12-12T01:18:11,999 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-12T01:18:11,999 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:18:12,001 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:18:12,001 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:18:12,001 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:18:12,001 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:18:12,001 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:18:12,001 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:18:12,001 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:18:12,001 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:18:12,001 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:18:12,001 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:18:12,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5e44fdb7f11c8751e6f4cd080c058df0, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d7b81fdefbe8adc7f0ef823ba2a0e2ec, ASSIGN}] 2024-12-12T01:18:12,002 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d7b81fdefbe8adc7f0ef823ba2a0e2ec, ASSIGN 2024-12-12T01:18:12,003 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5e44fdb7f11c8751e6f4cd080c058df0, ASSIGN 2024-12-12T01:18:12,003 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5e44fdb7f11c8751e6f4cd080c058df0, ASSIGN; state=OFFLINE, location=36236b0d7605,33883,1733966101479; forceNewPlan=false, retain=false 2024-12-12T01:18:12,003 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d7b81fdefbe8adc7f0ef823ba2a0e2ec, ASSIGN; state=OFFLINE, location=36236b0d7605,38295,1733966101221; forceNewPlan=false, retain=false 2024-12-12T01:18:12,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-12T01:18:12,154 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T01:18:12,154 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=d7b81fdefbe8adc7f0ef823ba2a0e2ec, regionState=OPENING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:18:12,154 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=5e44fdb7f11c8751e6f4cd080c058df0, regionState=OPENING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:18:12,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d7b81fdefbe8adc7f0ef823ba2a0e2ec, ASSIGN because future has completed 2024-12-12T01:18:12,156 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure d7b81fdefbe8adc7f0ef823ba2a0e2ec, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:18:12,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5e44fdb7f11c8751e6f4cd080c058df0, ASSIGN because future has completed 2024-12-12T01:18:12,157 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5e44fdb7f11c8751e6f4cd080c058df0, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:18:12,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-12T01:18:12,312 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. 2024-12-12T01:18:12,312 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:12,312 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => d7b81fdefbe8adc7f0ef823ba2a0e2ec, NAME => 'testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T01:18:12,312 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => 5e44fdb7f11c8751e6f4cd080c058df0, NAME => 'testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T01:18:12,312 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. service=AccessControlService 2024-12-12T01:18:12,312 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. service=AccessControlService 2024-12-12T01:18:12,313 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:18:12,313 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:18:12,313 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:12,313 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:12,313 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:18:12,313 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:18:12,313 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:12,313 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:12,313 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:12,313 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:12,314 INFO [StoreOpener-5e44fdb7f11c8751e6f4cd080c058df0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:12,315 INFO [StoreOpener-d7b81fdefbe8adc7f0ef823ba2a0e2ec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:12,316 INFO [StoreOpener-5e44fdb7f11c8751e6f4cd080c058df0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e44fdb7f11c8751e6f4cd080c058df0 columnFamilyName cf 2024-12-12T01:18:12,316 INFO [StoreOpener-d7b81fdefbe8adc7f0ef823ba2a0e2ec-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d7b81fdefbe8adc7f0ef823ba2a0e2ec columnFamilyName cf 2024-12-12T01:18:12,316 DEBUG [StoreOpener-5e44fdb7f11c8751e6f4cd080c058df0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:18:12,316 DEBUG [StoreOpener-d7b81fdefbe8adc7f0ef823ba2a0e2ec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:18:12,317 INFO [StoreOpener-5e44fdb7f11c8751e6f4cd080c058df0-1 {}] regionserver.HStore(327): Store=5e44fdb7f11c8751e6f4cd080c058df0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:18:12,317 INFO [StoreOpener-d7b81fdefbe8adc7f0ef823ba2a0e2ec-1 {}] regionserver.HStore(327): Store=d7b81fdefbe8adc7f0ef823ba2a0e2ec/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:18:12,317 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:12,317 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:12,317 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:12,318 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:12,318 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:12,318 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:12,318 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:12,318 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:12,319 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:12,319 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:12,321 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:12,321 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:12,323 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:18:12,323 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:18:12,324 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened d7b81fdefbe8adc7f0ef823ba2a0e2ec; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74725894, jitterRate=0.11350259184837341}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:18:12,324 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:12,324 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened 5e44fdb7f11c8751e6f4cd080c058df0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66400111, jitterRate=-0.010561242699623108}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:18:12,324 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:12,324 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for d7b81fdefbe8adc7f0ef823ba2a0e2ec: Running coprocessor pre-open hook at 1733966292313Writing region info on filesystem at 1733966292313Initializing all the Stores at 1733966292314 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966292314Cleaning up temporary data from old regions at 1733966292319 (+5 ms)Running coprocessor post-open hooks at 1733966292324 (+5 ms)Region opened successfully at 1733966292324 2024-12-12T01:18:12,324 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for 5e44fdb7f11c8751e6f4cd080c058df0: Running coprocessor pre-open hook at 1733966292313Writing region info on filesystem at 1733966292313Initializing all the Stores at 1733966292314 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966292314Cleaning up temporary data from old regions at 1733966292318 (+4 ms)Running coprocessor post-open hooks at 1733966292324 (+6 ms)Region opened successfully at 1733966292324 2024-12-12T01:18:12,325 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0., pid=100, masterSystemTime=1733966292309 2024-12-12T01:18:12,325 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec., pid=99, masterSystemTime=1733966292308 2024-12-12T01:18:12,327 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. 2024-12-12T01:18:12,327 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. 2024-12-12T01:18:12,327 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=d7b81fdefbe8adc7f0ef823ba2a0e2ec, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:18:12,328 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:12,328 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:12,328 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=5e44fdb7f11c8751e6f4cd080c058df0, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:18:12,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure d7b81fdefbe8adc7f0ef823ba2a0e2ec, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:18:12,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5e44fdb7f11c8751e6f4cd080c058df0, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:18:12,333 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=98 2024-12-12T01:18:12,333 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure d7b81fdefbe8adc7f0ef823ba2a0e2ec, server=36236b0d7605,38295,1733966101221 in 175 msec 2024-12-12T01:18:12,334 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=97 2024-12-12T01:18:12,334 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure 5e44fdb7f11c8751e6f4cd080c058df0, server=36236b0d7605,33883,1733966101479 in 175 msec 2024-12-12T01:18:12,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d7b81fdefbe8adc7f0ef823ba2a0e2ec, ASSIGN in 332 msec 2024-12-12T01:18:12,336 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-12-12T01:18:12,336 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5e44fdb7f11c8751e6f4cd080c058df0, ASSIGN in 333 msec 2024-12-12T01:18:12,337 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T01:18:12,337 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966292337"}]},"ts":"1733966292337"} 2024-12-12T01:18:12,339 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-12T01:18:12,340 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T01:18:12,340 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-12T01:18:12,344 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-12T01:18:12,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:12,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:12,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:12,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:12,348 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T01:18:12,348 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T01:18:12,349 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T01:18:12,349 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T01:18:12,350 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 422 msec 2024-12-12T01:18:12,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-12T01:18:12,555 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-12T01:18:12,555 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-12-12T01:18:12,556 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:18:12,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-12-12T01:18:12,561 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:18:12,561 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemState assigned. 2024-12-12T01:18:12,561 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-12T01:18:12,564 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T01:18:12,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966292564 (current time:1733966292564). 2024-12-12T01:18:12,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:18:12,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-12T01:18:12,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:18:12,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54441828, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:12,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:18:12,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:18:12,566 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:18:12,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:18:12,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:18:12,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d1ad07e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:12,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:18:12,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:18:12,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:12,568 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51606, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:18:12,569 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c58ebbc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:12,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:18:12,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:18:12,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:18:12,571 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42316, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:18:12,572 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:18:12,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:18:12,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:12,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:12,572 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:18:12,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@134a89d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:12,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:18:12,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:18:12,574 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:18:12,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:18:12,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:18:12,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7aaf775b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:12,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:18:12,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:18:12,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:12,575 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51618, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:18:12,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28c6e5e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:12,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:18:12,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:18:12,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:18:12,578 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42324, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:18:12,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:18:12,580 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:18:12,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:18:12,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:12,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:12,581 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:18:12,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-12T01:18:12,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:18:12,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T01:18:12,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-12T01:18:12,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-12T01:18:12,584 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:18:12,585 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:18:12,587 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:18:12,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742021_1197 (size=170) 2024-12-12T01:18:12,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742021_1197 (size=170) 2024-12-12T01:18:12,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742021_1197 (size=170) 2024-12-12T01:18:12,597 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:18:12,598 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5e44fdb7f11c8751e6f4cd080c058df0}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d7b81fdefbe8adc7f0ef823ba2a0e2ec}] 2024-12-12T01:18:12,599 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:12,599 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:12,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-12T01:18:12,750 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-12-12T01:18:12,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-12-12T01:18:12,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:12,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. 2024-12-12T01:18:12,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for 5e44fdb7f11c8751e6f4cd080c058df0: 2024-12-12T01:18:12,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. for emptySnaptb0-testExportFileSystemState completed. 2024-12-12T01:18:12,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for d7b81fdefbe8adc7f0ef823ba2a0e2ec: 2024-12-12T01:18:12,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-12T01:18:12,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. for emptySnaptb0-testExportFileSystemState completed. 2024-12-12T01:18:12,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:18:12,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:18:12,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-12T01:18:12,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:18:12,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:18:12,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742023_1199 (size=71) 2024-12-12T01:18:12,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742023_1199 (size=71) 2024-12-12T01:18:12,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742022_1198 (size=71) 2024-12-12T01:18:12,761 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:12,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742022_1198 (size=71) 2024-12-12T01:18:12,761 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-12-12T01:18:12,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742023_1199 (size=71) 2024-12-12T01:18:12,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742022_1198 (size=71) 2024-12-12T01:18:12,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-12-12T01:18:12,762 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:12,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. 2024-12-12T01:18:12,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-12T01:18:12,762 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:12,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-12-12T01:18:12,763 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:12,763 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:12,764 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5e44fdb7f11c8751e6f4cd080c058df0 in 165 msec 2024-12-12T01:18:12,766 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=101 2024-12-12T01:18:12,766 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d7b81fdefbe8adc7f0ef823ba2a0e2ec in 166 msec 2024-12-12T01:18:12,766 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:18:12,766 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:18:12,767 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:18:12,767 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-12T01:18:12,768 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-12T01:18:12,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742024_1200 (size=552) 2024-12-12T01:18:12,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742024_1200 (size=552) 2024-12-12T01:18:12,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742024_1200 (size=552) 2024-12-12T01:18:12,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-12T01:18:13,189 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:18:13,195 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:18:13,196 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-12T01:18:13,198 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:18:13,198 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-12T01:18:13,199 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 616 msec 2024-12-12T01:18:13,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-12T01:18:13,215 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-12T01:18:13,220 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='02eff0f0714b996b82abda757cb968ef8', locateType=CURRENT is [region=testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:18:13,220 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='1135166f72fcc1136f1731e0d1bdf707d', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:18:13,221 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='23dab0600166c3ffbe4530d5f5c9e0694', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:18:13,222 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='37e5bce0e531442c233445c73054e274d', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:18:13,222 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='4ffcb7d2ba27c4405e272bbe59949dff7', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:18:13,226 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33883 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:18:13,227 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:18:13,228 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-12T01:18:13,231 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-12T01:18:13,231 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:13,231 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:18:13,234 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-12T01:18:13,238 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-12T01:18:13,244 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-12T01:18:13,247 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T01:18:13,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966293247 (current time:1733966293247). 2024-12-12T01:18:13,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:18:13,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-12T01:18:13,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:18:13,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@416830c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:13,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:18:13,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:18:13,249 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:18:13,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:18:13,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:18:13,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@98b8e24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:13,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:18:13,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:18:13,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:13,250 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37636, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:18:13,251 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dfe78d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:18:13,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:18:13,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:18:13,253 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46578, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:18:13,255 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:18:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:18:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:13,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:13,255 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:18:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57a9b629, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:18:13,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:18:13,257 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:18:13,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:18:13,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:18:13,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17fc9d9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:13,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:18:13,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:18:13,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:13,259 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37648, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:18:13,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dcc70d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:13,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:18:13,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:18:13,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:18:13,262 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46586, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:18:13,264 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:18:13,266 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:18:13,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:18:13,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:13,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:13,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-12T01:18:13,266 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:18:13,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:18:13,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T01:18:13,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-12T01:18:13,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-12T01:18:13,270 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:18:13,271 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:18:13,273 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:18:13,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742025_1201 (size=165) 2024-12-12T01:18:13,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742025_1201 (size=165) 2024-12-12T01:18:13,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742025_1201 (size=165) 2024-12-12T01:18:13,281 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:18:13,281 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5e44fdb7f11c8751e6f4cd080c058df0}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d7b81fdefbe8adc7f0ef823ba2a0e2ec}] 2024-12-12T01:18:13,283 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:13,283 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:13,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-12T01:18:13,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-12T01:18:13,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-12T01:18:13,435 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:13,435 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. 2024-12-12T01:18:13,435 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing 5e44fdb7f11c8751e6f4cd080c058df0 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-12T01:18:13,435 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing d7b81fdefbe8adc7f0ef823ba2a0e2ec 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-12T01:18:13,452 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0/.tmp/cf/43af9e2dbcd246d7a12a1b8bfc59e2fc is 71, key is 0012b7f2addfe2f903f4a2b2e03ed56a/cf:q/1733966293225/Put/seqid=0 2024-12-12T01:18:13,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742026_1202 (size=5354) 2024-12-12T01:18:13,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742026_1202 (size=5354) 2024-12-12T01:18:13,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742026_1202 (size=5354) 2024-12-12T01:18:13,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec/.tmp/cf/439b082940bf40a795442b357933d9c0 is 71, key is 10c703481beb50bfb3132d425ee0c86a/cf:q/1733966293227/Put/seqid=0 2024-12-12T01:18:13,462 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0/.tmp/cf/43af9e2dbcd246d7a12a1b8bfc59e2fc 2024-12-12T01:18:13,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742027_1203 (size=8256) 2024-12-12T01:18:13,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742027_1203 (size=8256) 2024-12-12T01:18:13,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742027_1203 (size=8256) 2024-12-12T01:18:13,467 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec/.tmp/cf/439b082940bf40a795442b357933d9c0 2024-12-12T01:18:13,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0/.tmp/cf/43af9e2dbcd246d7a12a1b8bfc59e2fc as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0/cf/43af9e2dbcd246d7a12a1b8bfc59e2fc 2024-12-12T01:18:13,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec/.tmp/cf/439b082940bf40a795442b357933d9c0 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec/cf/439b082940bf40a795442b357933d9c0 2024-12-12T01:18:13,479 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec/cf/439b082940bf40a795442b357933d9c0, entries=46, sequenceid=6, filesize=8.1 K 2024-12-12T01:18:13,479 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0/cf/43af9e2dbcd246d7a12a1b8bfc59e2fc, entries=4, sequenceid=6, filesize=5.2 K 2024-12-12T01:18:13,480 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for d7b81fdefbe8adc7f0ef823ba2a0e2ec in 45ms, sequenceid=6, compaction requested=false 2024-12-12T01:18:13,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-12T01:18:13,480 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 5e44fdb7f11c8751e6f4cd080c058df0 in 45ms, sequenceid=6, compaction requested=false 2024-12-12T01:18:13,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-12T01:18:13,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for d7b81fdefbe8adc7f0ef823ba2a0e2ec: 2024-12-12T01:18:13,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. for snaptb0-testExportFileSystemState completed. 2024-12-12T01:18:13,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for 5e44fdb7f11c8751e6f4cd080c058df0: 2024-12-12T01:18:13,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. for snaptb0-testExportFileSystemState completed. 2024-12-12T01:18:13,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-12T01:18:13,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:18:13,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-12T01:18:13,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec/cf/439b082940bf40a795442b357933d9c0] hfiles 2024-12-12T01:18:13,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec/cf/439b082940bf40a795442b357933d9c0 for snapshot=snaptb0-testExportFileSystemState 2024-12-12T01:18:13,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:18:13,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0/cf/43af9e2dbcd246d7a12a1b8bfc59e2fc] hfiles 2024-12-12T01:18:13,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0/cf/43af9e2dbcd246d7a12a1b8bfc59e2fc for snapshot=snaptb0-testExportFileSystemState 2024-12-12T01:18:13,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742028_1204 (size=110) 2024-12-12T01:18:13,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742029_1205 (size=110) 2024-12-12T01:18:13,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742028_1204 (size=110) 2024-12-12T01:18:13,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742029_1205 (size=110) 2024-12-12T01:18:13,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742029_1205 (size=110) 2024-12-12T01:18:13,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742028_1204 (size=110) 2024-12-12T01:18:13,490 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. 2024-12-12T01:18:13,490 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-12T01:18:13,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-12-12T01:18:13,490 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:13,490 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-12T01:18:13,490 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:13,491 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:13,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-12-12T01:18:13,491 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:13,491 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:13,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d7b81fdefbe8adc7f0ef823ba2a0e2ec in 211 msec 2024-12-12T01:18:13,494 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=105, resume processing ppid=104 2024-12-12T01:18:13,494 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5e44fdb7f11c8751e6f4cd080c058df0 in 211 msec 2024-12-12T01:18:13,494 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:18:13,495 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:18:13,496 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:18:13,496 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-12T01:18:13,497 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-12T01:18:13,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742030_1206 (size=630) 2024-12-12T01:18:13,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742030_1206 (size=630) 2024-12-12T01:18:13,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742030_1206 (size=630) 2024-12-12T01:18:13,525 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:18:13,531 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:18:13,531 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-12T01:18:13,533 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:18:13,533 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-12T01:18:13,535 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 266 msec 2024-12-12T01:18:13,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-12T01:18:13,585 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-12T01:18:13,586 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966293585 2024-12-12T01:18:13,586 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33353, tgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966293585, rawTgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966293585, srcFsUri=hdfs://localhost:33353, srcDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:18:13,633 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33353, inputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:18:13,633 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966293585, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966293585/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-12T01:18:13,636 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-12T01:18:13,641 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966293585/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-12T01:18:13,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742031_1207 (size=165) 2024-12-12T01:18:13,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742031_1207 (size=165) 2024-12-12T01:18:13,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742031_1207 (size=165) 2024-12-12T01:18:13,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742032_1208 (size=630) 2024-12-12T01:18:13,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742032_1208 (size=630) 2024-12-12T01:18:13,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742032_1208 (size=630) 2024-12-12T01:18:13,672 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:13,672 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:13,672 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:14,877 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-11493423824671568801.jar 2024-12-12T01:18:14,877 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:14,878 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:14,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-2229389211889364249.jar 2024-12-12T01:18:14,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:14,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:14,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:14,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:14,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:14,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:14,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T01:18:14,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T01:18:14,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T01:18:14,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T01:18:14,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T01:18:14,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T01:18:14,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T01:18:14,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T01:18:14,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T01:18:14,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T01:18:14,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T01:18:14,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:18:14,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:18:14,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:18:14,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:18:14,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:18:14,964 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:18:14,964 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:18:15,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742033_1209 (size=131440) 2024-12-12T01:18:15,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742033_1209 (size=131440) 2024-12-12T01:18:15,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742033_1209 (size=131440) 2024-12-12T01:18:15,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742034_1210 (size=4188619) 2024-12-12T01:18:15,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742034_1210 (size=4188619) 2024-12-12T01:18:15,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742034_1210 (size=4188619) 2024-12-12T01:18:15,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742035_1211 (size=1323991) 2024-12-12T01:18:15,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742035_1211 (size=1323991) 2024-12-12T01:18:15,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742035_1211 (size=1323991) 2024-12-12T01:18:15,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742036_1212 (size=903541) 2024-12-12T01:18:15,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742036_1212 (size=903541) 2024-12-12T01:18:15,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742036_1212 (size=903541) 2024-12-12T01:18:15,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742037_1213 (size=8360360) 2024-12-12T01:18:15,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742037_1213 (size=8360360) 2024-12-12T01:18:15,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742037_1213 (size=8360360) 2024-12-12T01:18:15,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742038_1214 (size=1877034) 2024-12-12T01:18:15,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742038_1214 (size=1877034) 2024-12-12T01:18:15,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742038_1214 (size=1877034) 2024-12-12T01:18:15,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742039_1215 (size=77835) 2024-12-12T01:18:15,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742039_1215 (size=77835) 2024-12-12T01:18:15,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742039_1215 (size=77835) 2024-12-12T01:18:15,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742040_1216 (size=30949) 2024-12-12T01:18:15,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742040_1216 (size=30949) 2024-12-12T01:18:15,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742040_1216 (size=30949) 2024-12-12T01:18:15,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742041_1217 (size=1597213) 2024-12-12T01:18:15,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742041_1217 (size=1597213) 2024-12-12T01:18:15,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742041_1217 (size=1597213) 2024-12-12T01:18:15,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742042_1218 (size=4695811) 2024-12-12T01:18:15,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742042_1218 (size=4695811) 2024-12-12T01:18:15,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742042_1218 (size=4695811) 2024-12-12T01:18:15,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742043_1219 (size=232957) 2024-12-12T01:18:15,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742043_1219 (size=232957) 2024-12-12T01:18:15,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742043_1219 (size=232957) 2024-12-12T01:18:15,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742044_1220 (size=127628) 2024-12-12T01:18:15,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742044_1220 (size=127628) 2024-12-12T01:18:15,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742044_1220 (size=127628) 2024-12-12T01:18:15,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742045_1221 (size=20406) 2024-12-12T01:18:15,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742045_1221 (size=20406) 2024-12-12T01:18:15,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742045_1221 (size=20406) 2024-12-12T01:18:15,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742046_1222 (size=5175431) 2024-12-12T01:18:15,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742046_1222 (size=5175431) 2024-12-12T01:18:15,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742046_1222 (size=5175431) 2024-12-12T01:18:15,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742047_1223 (size=217634) 2024-12-12T01:18:15,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742047_1223 (size=217634) 2024-12-12T01:18:15,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742047_1223 (size=217634) 2024-12-12T01:18:15,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742048_1224 (size=1832290) 2024-12-12T01:18:15,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742048_1224 (size=1832290) 2024-12-12T01:18:15,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742048_1224 (size=1832290) 2024-12-12T01:18:15,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742049_1225 (size=322274) 2024-12-12T01:18:15,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742049_1225 (size=322274) 2024-12-12T01:18:15,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742049_1225 (size=322274) 2024-12-12T01:18:15,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742050_1226 (size=6426224) 2024-12-12T01:18:15,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742050_1226 (size=6426224) 2024-12-12T01:18:15,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742050_1226 (size=6426224) 2024-12-12T01:18:15,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742051_1227 (size=503880) 2024-12-12T01:18:15,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742051_1227 (size=503880) 2024-12-12T01:18:15,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742051_1227 (size=503880) 2024-12-12T01:18:15,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742052_1228 (size=443172) 2024-12-12T01:18:15,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742052_1228 (size=443172) 2024-12-12T01:18:15,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742052_1228 (size=443172) 2024-12-12T01:18:15,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742053_1229 (size=29229) 2024-12-12T01:18:15,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742053_1229 (size=29229) 2024-12-12T01:18:15,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742053_1229 (size=29229) 2024-12-12T01:18:15,668 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0003_000001 (auth:SIMPLE) from 127.0.0.1:40772 2024-12-12T01:18:15,680 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0003/container_1733966109028_0003_01_000001/launch_container.sh] 2024-12-12T01:18:15,680 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0003/container_1733966109028_0003_01_000001/container_tokens] 2024-12-12T01:18:15,680 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0003/container_1733966109028_0003_01_000001/sysfs] 2024-12-12T01:18:15,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742054_1230 (size=24096) 2024-12-12T01:18:15,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742054_1230 (size=24096) 2024-12-12T01:18:15,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742054_1230 (size=24096) 2024-12-12T01:18:15,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742055_1231 (size=111872) 2024-12-12T01:18:15,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742055_1231 (size=111872) 2024-12-12T01:18:15,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742055_1231 (size=111872) 2024-12-12T01:18:15,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742056_1232 (size=45609) 2024-12-12T01:18:15,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742056_1232 (size=45609) 2024-12-12T01:18:15,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742056_1232 (size=45609) 2024-12-12T01:18:16,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742057_1233 (size=136454) 2024-12-12T01:18:16,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742057_1233 (size=136454) 2024-12-12T01:18:16,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742057_1233 (size=136454) 2024-12-12T01:18:16,356 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T01:18:16,359 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-12T01:18:16,361 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-12T01:18:16,361 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-12T01:18:16,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742058_1234 (size=447) 2024-12-12T01:18:16,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742058_1234 (size=447) 2024-12-12T01:18:16,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742058_1234 (size=447) 2024-12-12T01:18:16,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742059_1235 (size=21) 2024-12-12T01:18:16,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742059_1235 (size=21) 2024-12-12T01:18:16,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742059_1235 (size=21) 2024-12-12T01:18:16,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742060_1236 (size=304008) 2024-12-12T01:18:16,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742060_1236 (size=304008) 2024-12-12T01:18:16,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742060_1236 (size=304008) 2024-12-12T01:18:16,435 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:18:16,435 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:18:16,514 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0004_000001 (auth:SIMPLE) from 127.0.0.1:39462 2024-12-12T01:18:16,683 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:18:20,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-12T01:18:20,712 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-12T01:18:20,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-12T01:18:20,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-12T01:18:23,125 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0004_000001 (auth:SIMPLE) from 127.0.0.1:38054 2024-12-12T01:18:23,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742061_1237 (size=349706) 2024-12-12T01:18:23,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742061_1237 (size=349706) 2024-12-12T01:18:23,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742061_1237 (size=349706) 2024-12-12T01:18:25,771 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0004_000001 (auth:SIMPLE) from 127.0.0.1:34568 2024-12-12T01:18:25,771 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0004_000001 (auth:SIMPLE) from 127.0.0.1:51002 2024-12-12T01:18:26,215 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:18:28,909 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:18:32,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742062_1238 (size=8256) 2024-12-12T01:18:32,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742062_1238 (size=8256) 2024-12-12T01:18:32,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742062_1238 (size=8256) 2024-12-12T01:18:32,200 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_2/usercache/jenkins/appcache/application_1733966109028_0004/container_1733966109028_0004_01_000002/launch_container.sh] 2024-12-12T01:18:32,200 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_2/usercache/jenkins/appcache/application_1733966109028_0004/container_1733966109028_0004_01_000002/container_tokens] 2024-12-12T01:18:32,200 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_2/usercache/jenkins/appcache/application_1733966109028_0004/container_1733966109028_0004_01_000002/sysfs] 2024-12-12T01:18:33,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742064_1240 (size=5354) 2024-12-12T01:18:33,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742064_1240 (size=5354) 2024-12-12T01:18:33,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742064_1240 (size=5354) 2024-12-12T01:18:33,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742063_1239 (size=22168) 2024-12-12T01:18:33,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742063_1239 (size=22168) 2024-12-12T01:18:33,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742063_1239 (size=22168) 2024-12-12T01:18:33,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742065_1241 (size=466) 2024-12-12T01:18:33,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742065_1241 (size=466) 2024-12-12T01:18:33,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742065_1241 (size=466) 2024-12-12T01:18:33,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742066_1242 (size=22168) 2024-12-12T01:18:33,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742066_1242 (size=22168) 2024-12-12T01:18:33,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742066_1242 (size=22168) 2024-12-12T01:18:33,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742067_1243 (size=349706) 2024-12-12T01:18:33,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742067_1243 (size=349706) 2024-12-12T01:18:33,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742067_1243 (size=349706) 2024-12-12T01:18:33,592 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_3/usercache/jenkins/appcache/application_1733966109028_0004/container_1733966109028_0004_01_000003/launch_container.sh] 2024-12-12T01:18:33,592 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_3/usercache/jenkins/appcache/application_1733966109028_0004/container_1733966109028_0004_01_000003/container_tokens] 2024-12-12T01:18:33,592 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_3/usercache/jenkins/appcache/application_1733966109028_0004/container_1733966109028_0004_01_000003/sysfs] 2024-12-12T01:18:33,593 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0004_000001 (auth:SIMPLE) from 127.0.0.1:54388 2024-12-12T01:18:34,738 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-12T01:18:34,739 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-12T01:18:34,745 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-12-12T01:18:34,745 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-12T01:18:34,746 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-12T01:18:34,746 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-12T01:18:34,746 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-12T01:18:34,746 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-12T01:18:34,746 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966293585/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966293585/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-12T01:18:34,747 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966293585/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-12T01:18:34,747 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966293585/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-12T01:18:34,753 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-12T01:18:34,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-12T01:18:34,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-12T01:18:34,757 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966314757"}]},"ts":"1733966314757"} 2024-12-12T01:18:34,759 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-12T01:18:34,759 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-12T01:18:34,759 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-12T01:18:34,761 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5e44fdb7f11c8751e6f4cd080c058df0, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d7b81fdefbe8adc7f0ef823ba2a0e2ec, UNASSIGN}] 2024-12-12T01:18:34,762 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d7b81fdefbe8adc7f0ef823ba2a0e2ec, UNASSIGN 2024-12-12T01:18:34,762 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5e44fdb7f11c8751e6f4cd080c058df0, UNASSIGN 2024-12-12T01:18:34,763 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=5e44fdb7f11c8751e6f4cd080c058df0, regionState=CLOSING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:18:34,763 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=d7b81fdefbe8adc7f0ef823ba2a0e2ec, regionState=CLOSING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:18:34,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5e44fdb7f11c8751e6f4cd080c058df0, UNASSIGN because future has completed 2024-12-12T01:18:34,766 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d7b81fdefbe8adc7f0ef823ba2a0e2ec, UNASSIGN because future has completed 2024-12-12T01:18:34,768 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:18:34,768 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:18:34,768 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5e44fdb7f11c8751e6f4cd080c058df0, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:18:34,768 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure d7b81fdefbe8adc7f0ef823ba2a0e2ec, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:18:34,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-12T01:18:34,921 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:34,921 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:34,921 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:18:34,921 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:18:34,922 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing 5e44fdb7f11c8751e6f4cd080c058df0, disabling compactions & flushes 2024-12-12T01:18:34,922 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing d7b81fdefbe8adc7f0ef823ba2a0e2ec, disabling compactions & flushes 2024-12-12T01:18:34,922 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:34,922 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. 2024-12-12T01:18:34,922 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:34,922 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. 2024-12-12T01:18:34,922 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. after waiting 0 ms 2024-12-12T01:18:34,922 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. after waiting 0 ms 2024-12-12T01:18:34,922 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:34,922 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. 2024-12-12T01:18:34,926 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:18:34,926 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:18:34,927 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:18:34,927 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:18:34,927 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0. 2024-12-12T01:18:34,927 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec. 2024-12-12T01:18:34,927 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for 5e44fdb7f11c8751e6f4cd080c058df0: Waiting for close lock at 1733966314921Running coprocessor pre-close hooks at 1733966314921Disabling compacts and flushes for region at 1733966314922 (+1 ms)Disabling writes for close at 1733966314922Writing region close event to WAL at 1733966314922Running coprocessor post-close hooks at 1733966314927 (+5 ms)Closed at 1733966314927 2024-12-12T01:18:34,927 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for d7b81fdefbe8adc7f0ef823ba2a0e2ec: Waiting for close lock at 1733966314922Running coprocessor pre-close hooks at 1733966314922Disabling compacts and flushes for region at 1733966314922Disabling writes for close at 1733966314922Writing region close event to WAL at 1733966314923 (+1 ms)Running coprocessor post-close hooks at 1733966314927 (+4 ms)Closed at 1733966314927 2024-12-12T01:18:34,929 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed 5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:34,930 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=5e44fdb7f11c8751e6f4cd080c058df0, regionState=CLOSED 2024-12-12T01:18:34,930 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:34,930 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=d7b81fdefbe8adc7f0ef823ba2a0e2ec, regionState=CLOSED 2024-12-12T01:18:34,932 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5e44fdb7f11c8751e6f4cd080c058df0, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:18:34,932 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure d7b81fdefbe8adc7f0ef823ba2a0e2ec, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:18:34,934 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=109 2024-12-12T01:18:34,934 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure 5e44fdb7f11c8751e6f4cd080c058df0, server=36236b0d7605,33883,1733966101479 in 165 msec 2024-12-12T01:18:34,935 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=110 2024-12-12T01:18:34,935 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure d7b81fdefbe8adc7f0ef823ba2a0e2ec, server=36236b0d7605,38295,1733966101221 in 165 msec 2024-12-12T01:18:34,935 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5e44fdb7f11c8751e6f4cd080c058df0, UNASSIGN in 173 msec 2024-12-12T01:18:34,936 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=110, resume processing ppid=108 2024-12-12T01:18:34,936 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d7b81fdefbe8adc7f0ef823ba2a0e2ec, UNASSIGN in 174 msec 2024-12-12T01:18:34,938 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-12-12T01:18:34,938 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 178 msec 2024-12-12T01:18:34,939 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966314939"}]},"ts":"1733966314939"} 2024-12-12T01:18:34,941 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-12T01:18:34,941 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-12T01:18:34,943 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 189 msec 2024-12-12T01:18:35,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-12T01:18:35,075 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-12T01:18:35,076 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-12T01:18:35,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-12T01:18:35,077 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-12T01:18:35,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-12T01:18:35,078 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-12T01:18:35,081 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-12T01:18:35,082 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:35,082 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:35,084 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec/recovered.edits] 2024-12-12T01:18:35,084 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0/recovered.edits] 2024-12-12T01:18:35,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T01:18:35,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T01:18:35,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T01:18:35,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T01:18:35,085 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-12T01:18:35,085 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-12T01:18:35,086 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-12T01:18:35,086 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-12T01:18:35,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T01:18:35,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T01:18:35,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T01:18:35,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:35,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-12T01:18:35,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:35,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:35,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:35,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-12T01:18:35,090 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec/cf/439b082940bf40a795442b357933d9c0 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec/cf/439b082940bf40a795442b357933d9c0 2024-12-12T01:18:35,091 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0/cf/43af9e2dbcd246d7a12a1b8bfc59e2fc to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0/cf/43af9e2dbcd246d7a12a1b8bfc59e2fc 2024-12-12T01:18:35,093 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec/recovered.edits/9.seqid 2024-12-12T01:18:35,094 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0/recovered.edits/9.seqid 2024-12-12T01:18:35,094 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/d7b81fdefbe8adc7f0ef823ba2a0e2ec 2024-12-12T01:18:35,094 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemState/5e44fdb7f11c8751e6f4cd080c058df0 2024-12-12T01:18:35,094 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-12T01:18:35,096 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-12T01:18:35,100 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-12T01:18:35,102 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-12T01:18:35,103 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-12T01:18:35,104 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-12T01:18:35,104 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966315104"}]},"ts":"9223372036854775807"} 2024-12-12T01:18:35,104 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966315104"}]},"ts":"9223372036854775807"} 2024-12-12T01:18:35,106 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-12T01:18:35,106 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 5e44fdb7f11c8751e6f4cd080c058df0, NAME => 'testtb-testExportFileSystemState,,1733966291926.5e44fdb7f11c8751e6f4cd080c058df0.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => d7b81fdefbe8adc7f0ef823ba2a0e2ec, NAME => 'testtb-testExportFileSystemState,1,1733966291926.d7b81fdefbe8adc7f0ef823ba2a0e2ec.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T01:18:35,106 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-12T01:18:35,106 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733966315106"}]},"ts":"9223372036854775807"} 2024-12-12T01:18:35,108 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-12-12T01:18:35,108 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-12T01:18:35,109 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 32 msec 2024-12-12T01:18:35,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-12T01:18:35,195 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-12-12T01:18:35,195 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-12T01:18:35,202 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-12-12T01:18:35,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-12T01:18:35,205 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-12-12T01:18:35,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-12T01:18:35,230 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=797 (was 792) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1778943433_1 at /127.0.0.1:57780 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 27050) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:58694 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44585 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43095 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:44585 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:54410 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3707 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:57822 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=788 (was 790), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=746 (was 702) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 14) - ProcessCount LEAK? -, AvailableMemoryMB=6140 (was 6657) 2024-12-12T01:18:35,230 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-12T01:18:35,248 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=797, OpenFileDescriptor=788, MaxFileDescriptor=1048576, SystemLoadAverage=746, ProcessCount=17, AvailableMemoryMB=6140 2024-12-12T01:18:35,248 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-12T01:18:35,250 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T01:18:35,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-12T01:18:35,252 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T01:18:35,253 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:18:35,253 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-12-12T01:18:35,253 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T01:18:35,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-12T01:18:35,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742068_1244 (size=404) 2024-12-12T01:18:35,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742068_1244 (size=404) 2024-12-12T01:18:35,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742068_1244 (size=404) 2024-12-12T01:18:35,262 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7c8ee88d2e52c9d5777852576a25ebbf, NAME => 'testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:18:35,263 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 5a9d347d075abc43e7359e9246fa23f7, NAME => 'testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:18:35,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742069_1245 (size=65) 2024-12-12T01:18:35,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742069_1245 (size=65) 2024-12-12T01:18:35,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742069_1245 (size=65) 2024-12-12T01:18:35,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742070_1246 (size=65) 2024-12-12T01:18:35,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742070_1246 (size=65) 2024-12-12T01:18:35,279 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:18:35,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742070_1246 (size=65) 2024-12-12T01:18:35,279 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 7c8ee88d2e52c9d5777852576a25ebbf, disabling compactions & flushes 2024-12-12T01:18:35,279 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:18:35,279 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:18:35,279 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. after waiting 0 ms 2024-12-12T01:18:35,279 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:18:35,279 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:18:35,279 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:18:35,279 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 5a9d347d075abc43e7359e9246fa23f7, disabling compactions & flushes 2024-12-12T01:18:35,279 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7c8ee88d2e52c9d5777852576a25ebbf: Waiting for close lock at 1733966315279Disabling compacts and flushes for region at 1733966315279Disabling writes for close at 1733966315279Writing region close event to WAL at 1733966315279Closed at 1733966315279 2024-12-12T01:18:35,279 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. 2024-12-12T01:18:35,279 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. 2024-12-12T01:18:35,279 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. after waiting 0 ms 2024-12-12T01:18:35,279 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. 2024-12-12T01:18:35,279 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. 2024-12-12T01:18:35,279 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 5a9d347d075abc43e7359e9246fa23f7: Waiting for close lock at 1733966315279Disabling compacts and flushes for region at 1733966315279Disabling writes for close at 1733966315279Writing region close event to WAL at 1733966315279Closed at 1733966315279 2024-12-12T01:18:35,281 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T01:18:35,281 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733966315281"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966315281"}]},"ts":"1733966315281"} 2024-12-12T01:18:35,282 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733966315281"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966315281"}]},"ts":"1733966315281"} 2024-12-12T01:18:35,284 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-12T01:18:35,285 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T01:18:35,285 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966315285"}]},"ts":"1733966315285"} 2024-12-12T01:18:35,287 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-12T01:18:35,288 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:18:35,289 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:18:35,289 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:18:35,289 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:18:35,289 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:18:35,289 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:18:35,289 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:18:35,289 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:18:35,289 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:18:35,289 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:18:35,289 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:18:35,290 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7c8ee88d2e52c9d5777852576a25ebbf, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5a9d347d075abc43e7359e9246fa23f7, ASSIGN}] 2024-12-12T01:18:35,290 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5a9d347d075abc43e7359e9246fa23f7, ASSIGN 2024-12-12T01:18:35,291 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7c8ee88d2e52c9d5777852576a25ebbf, ASSIGN 2024-12-12T01:18:35,291 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5a9d347d075abc43e7359e9246fa23f7, ASSIGN; state=OFFLINE, location=36236b0d7605,38295,1733966101221; forceNewPlan=false, retain=false 2024-12-12T01:18:35,291 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7c8ee88d2e52c9d5777852576a25ebbf, ASSIGN; state=OFFLINE, location=36236b0d7605,33883,1733966101479; forceNewPlan=false, retain=false 2024-12-12T01:18:35,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-12T01:18:35,442 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T01:18:35,442 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=7c8ee88d2e52c9d5777852576a25ebbf, regionState=OPENING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:18:35,442 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=5a9d347d075abc43e7359e9246fa23f7, regionState=OPENING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:18:35,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7c8ee88d2e52c9d5777852576a25ebbf, ASSIGN because future has completed 2024-12-12T01:18:35,449 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7c8ee88d2e52c9d5777852576a25ebbf, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:18:35,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5a9d347d075abc43e7359e9246fa23f7, ASSIGN because future has completed 2024-12-12T01:18:35,450 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5a9d347d075abc43e7359e9246fa23f7, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:18:35,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-12T01:18:35,605 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:18:35,605 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => 7c8ee88d2e52c9d5777852576a25ebbf, NAME => 'testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T01:18:35,606 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. service=AccessControlService 2024-12-12T01:18:35,606 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:18:35,606 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. 2024-12-12T01:18:35,606 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:35,606 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:18:35,606 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => 5a9d347d075abc43e7359e9246fa23f7, NAME => 'testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T01:18:35,606 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:35,606 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:35,607 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. service=AccessControlService 2024-12-12T01:18:35,607 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:18:35,607 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:35,607 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:18:35,607 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:35,607 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:35,608 INFO [StoreOpener-7c8ee88d2e52c9d5777852576a25ebbf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:35,609 INFO [StoreOpener-5a9d347d075abc43e7359e9246fa23f7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:35,609 INFO [StoreOpener-7c8ee88d2e52c9d5777852576a25ebbf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c8ee88d2e52c9d5777852576a25ebbf columnFamilyName cf 2024-12-12T01:18:35,609 DEBUG [StoreOpener-7c8ee88d2e52c9d5777852576a25ebbf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:18:35,610 INFO [StoreOpener-5a9d347d075abc43e7359e9246fa23f7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a9d347d075abc43e7359e9246fa23f7 columnFamilyName cf 2024-12-12T01:18:35,610 DEBUG [StoreOpener-5a9d347d075abc43e7359e9246fa23f7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:18:35,610 INFO [StoreOpener-7c8ee88d2e52c9d5777852576a25ebbf-1 {}] regionserver.HStore(327): Store=7c8ee88d2e52c9d5777852576a25ebbf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:18:35,610 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:35,610 INFO [StoreOpener-5a9d347d075abc43e7359e9246fa23f7-1 {}] regionserver.HStore(327): Store=5a9d347d075abc43e7359e9246fa23f7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:18:35,610 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:35,611 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:35,611 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:35,611 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:35,611 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:35,612 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:35,612 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:35,612 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:35,612 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:35,613 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:35,613 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:35,616 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:18:35,616 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:18:35,616 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened 7c8ee88d2e52c9d5777852576a25ebbf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72175770, jitterRate=0.07550278306007385}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:18:35,616 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened 5a9d347d075abc43e7359e9246fa23f7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70809481, jitterRate=0.05514349043369293}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:18:35,616 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:35,616 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:35,617 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for 5a9d347d075abc43e7359e9246fa23f7: Running coprocessor pre-open hook at 1733966315607Writing region info on filesystem at 1733966315607Initializing all the Stores at 1733966315608 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966315608Cleaning up temporary data from old regions at 1733966315612 (+4 ms)Running coprocessor post-open hooks at 1733966315616 (+4 ms)Region opened successfully at 1733966315617 (+1 ms) 2024-12-12T01:18:35,617 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for 7c8ee88d2e52c9d5777852576a25ebbf: Running coprocessor pre-open hook at 1733966315607Writing region info on filesystem at 1733966315607Initializing all the Stores at 1733966315607Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966315607Cleaning up temporary data from old regions at 1733966315612 (+5 ms)Running coprocessor post-open hooks at 1733966315616 (+4 ms)Region opened successfully at 1733966315617 (+1 ms) 2024-12-12T01:18:35,617 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf., pid=117, masterSystemTime=1733966315601 2024-12-12T01:18:35,617 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7., pid=118, masterSystemTime=1733966315603 2024-12-12T01:18:35,619 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. 2024-12-12T01:18:35,619 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. 2024-12-12T01:18:35,620 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=5a9d347d075abc43e7359e9246fa23f7, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:18:35,620 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:18:35,620 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:18:35,621 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=7c8ee88d2e52c9d5777852576a25ebbf, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:18:35,622 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5a9d347d075abc43e7359e9246fa23f7, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:18:35,622 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7c8ee88d2e52c9d5777852576a25ebbf, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:18:35,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=116 2024-12-12T01:18:35,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure 5a9d347d075abc43e7359e9246fa23f7, server=36236b0d7605,38295,1733966101221 in 172 msec 2024-12-12T01:18:35,625 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=115 2024-12-12T01:18:35,625 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 7c8ee88d2e52c9d5777852576a25ebbf, server=36236b0d7605,33883,1733966101479 in 174 msec 2024-12-12T01:18:35,626 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5a9d347d075abc43e7359e9246fa23f7, ASSIGN in 334 msec 2024-12-12T01:18:35,627 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=114 2024-12-12T01:18:35,627 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7c8ee88d2e52c9d5777852576a25ebbf, ASSIGN in 335 msec 2024-12-12T01:18:35,628 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T01:18:35,628 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966315628"}]},"ts":"1733966315628"} 2024-12-12T01:18:35,629 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-12T01:18:35,630 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T01:18:35,630 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-12T01:18:35,633 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-12T01:18:35,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:35,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:35,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:35,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:18:35,638 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-12T01:18:35,638 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-12T01:18:35,638 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-12T01:18:35,638 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-12T01:18:35,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 388 msec 2024-12-12T01:18:35,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-12T01:18:35,884 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-12T01:18:35,884 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-12-12T01:18:35,885 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:18:35,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-12-12T01:18:35,888 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:18:35,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testConsecutiveExports assigned. 2024-12-12T01:18:35,888 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-12T01:18:35,891 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-12T01:18:35,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966315891 (current time:1733966315891). 2024-12-12T01:18:35,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:18:35,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-12T01:18:35,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:18:35,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76e97205, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:35,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:18:35,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:18:35,893 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:18:35,893 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:18:35,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:18:35,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f9dbae7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:35,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:18:35,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:18:35,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:35,895 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35496, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:18:35,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50fd4e64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:35,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:18:35,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:18:35,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:18:35,898 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59812, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:18:35,899 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:18:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:18:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:35,899 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:18:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bf301e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:18:35,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:18:35,901 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:18:35,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:18:35,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:18:35,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2271b1b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:35,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:18:35,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:18:35,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:35,902 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35516, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:18:35,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5498668e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:35,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:18:35,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:18:35,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:18:35,904 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59816, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:18:35,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:18:35,907 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083. 2024-12-12T01:18:35,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:18:35,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:35,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:35,907 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:18:35,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-12T01:18:35,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:18:35,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-12T01:18:35,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-12T01:18:35,910 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:18:35,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-12T01:18:35,911 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:18:35,913 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:18:35,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742071_1247 (size=161) 2024-12-12T01:18:35,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742071_1247 (size=161) 2024-12-12T01:18:35,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742071_1247 (size=161) 2024-12-12T01:18:35,927 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:18:35,927 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c8ee88d2e52c9d5777852576a25ebbf}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a9d347d075abc43e7359e9246fa23f7}] 2024-12-12T01:18:35,928 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:35,928 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:36,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-12T01:18:36,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-12-12T01:18:36,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:18:36,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-12-12T01:18:36,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. 2024-12-12T01:18:36,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for 7c8ee88d2e52c9d5777852576a25ebbf: 2024-12-12T01:18:36,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. for emptySnaptb0-testConsecutiveExports completed. 2024-12-12T01:18:36,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for 5a9d347d075abc43e7359e9246fa23f7: 2024-12-12T01:18:36,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. for emptySnaptb0-testConsecutiveExports completed. 2024-12-12T01:18:36,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-12T01:18:36,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:18:36,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-12T01:18:36,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:18:36,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:18:36,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:18:36,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742072_1248 (size=68) 2024-12-12T01:18:36,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742072_1248 (size=68) 2024-12-12T01:18:36,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742073_1249 (size=68) 2024-12-12T01:18:36,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742072_1248 (size=68) 2024-12-12T01:18:36,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742073_1249 (size=68) 2024-12-12T01:18:36,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742073_1249 (size=68) 2024-12-12T01:18:36,102 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. 2024-12-12T01:18:36,102 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-12T01:18:36,102 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:18:36,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-12-12T01:18:36,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-12-12T01:18:36,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-12-12T01:18:36,103 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:36,103 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:36,103 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:36,103 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:36,106 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5a9d347d075abc43e7359e9246fa23f7 in 177 msec 2024-12-12T01:18:36,108 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=120, resume processing ppid=119 2024-12-12T01:18:36,108 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7c8ee88d2e52c9d5777852576a25ebbf in 177 msec 2024-12-12T01:18:36,108 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:18:36,109 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:18:36,110 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:18:36,110 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-12T01:18:36,111 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-12T01:18:36,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742074_1250 (size=543) 2024-12-12T01:18:36,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742074_1250 (size=543) 2024-12-12T01:18:36,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742074_1250 (size=543) 2024-12-12T01:18:36,153 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:18:36,158 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:18:36,159 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-12T01:18:36,160 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:18:36,160 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-12T01:18:36,162 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 252 msec 2024-12-12T01:18:36,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-12T01:18:36,225 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-12T01:18:36,230 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='0e9749ab0796a55a62b34e2276ddd8bd9', locateType=CURRENT is [region=testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:18:36,231 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='1e4aa6f6ace217c76b7724351e608b9ca', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:18:36,232 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='27dd367ac2d7f3b2478ad18db102439da', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:18:36,233 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='3df889e1393791439f1c4edc490e24738', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:18:36,234 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='4af75dc819b6c6431feab560febd54365', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:18:36,235 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='5911d58d5eaa8f1bc2104ac73863cb516', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:18:36,237 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33883 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:18:36,240 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:18:36,242 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-12T01:18:36,245 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-12T01:18:36,245 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:18:36,245 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:18:36,247 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-12T01:18:36,253 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-12T01:18:36,262 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-12T01:18:36,264 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-12T01:18:36,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966316264 (current time:1733966316264). 2024-12-12T01:18:36,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:18:36,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-12T01:18:36,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:18:36,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@563a329, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:36,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:18:36,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:18:36,266 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:18:36,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:18:36,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:18:36,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@296953dc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:36,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:18:36,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:18:36,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:36,267 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35530, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:18:36,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34c8609a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:36,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:18:36,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:18:36,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:18:36,270 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59818, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:18:36,271 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:18:36,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:18:36,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:36,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:36,271 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:18:36,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6251e809, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:36,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:18:36,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:18:36,273 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:18:36,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:18:36,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:18:36,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@751a98e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:36,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:18:36,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:18:36,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:36,274 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35554, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:18:36,274 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@511385bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:18:36,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:18:36,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:18:36,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:18:36,276 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59826, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:18:36,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:18:36,279 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:18:36,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:18:36,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:36,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:18:36,280 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:18:36,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-12T01:18:36,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:18:36,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-12T01:18:36,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-12T01:18:36,283 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:18:36,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-12T01:18:36,284 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:18:36,286 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:18:36,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742075_1251 (size=156) 2024-12-12T01:18:36,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742075_1251 (size=156) 2024-12-12T01:18:36,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742075_1251 (size=156) 2024-12-12T01:18:36,314 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:18:36,314 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c8ee88d2e52c9d5777852576a25ebbf}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a9d347d075abc43e7359e9246fa23f7}] 2024-12-12T01:18:36,316 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:36,316 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:36,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-12T01:18:36,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-12-12T01:18:36,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-12T01:18:36,468 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. 2024-12-12T01:18:36,468 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:18:36,469 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing 7c8ee88d2e52c9d5777852576a25ebbf 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-12T01:18:36,469 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing 5a9d347d075abc43e7359e9246fa23f7 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-12T01:18:36,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7/.tmp/cf/70cc5dea071a4469977f7ab855bc37e7 is 71, key is 23f721001bbf952df09a4403accae6a0/cf:q/1733966316240/Put/seqid=0 2024-12-12T01:18:36,488 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf/.tmp/cf/78e5499317494521aa0db686376ebbe2 is 71, key is 059e9a1130ebbfb129f47c3fbed0d0c1/cf:q/1733966316237/Put/seqid=0 2024-12-12T01:18:36,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742076_1252 (size=8326) 2024-12-12T01:18:36,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742076_1252 (size=8326) 2024-12-12T01:18:36,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742076_1252 (size=8326) 2024-12-12T01:18:36,492 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7/.tmp/cf/70cc5dea071a4469977f7ab855bc37e7 2024-12-12T01:18:36,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742077_1253 (size=5286) 2024-12-12T01:18:36,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742077_1253 (size=5286) 2024-12-12T01:18:36,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742077_1253 (size=5286) 2024-12-12T01:18:36,496 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf/.tmp/cf/78e5499317494521aa0db686376ebbe2 2024-12-12T01:18:36,499 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7/.tmp/cf/70cc5dea071a4469977f7ab855bc37e7 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7/cf/70cc5dea071a4469977f7ab855bc37e7 2024-12-12T01:18:36,501 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf/.tmp/cf/78e5499317494521aa0db686376ebbe2 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf/cf/78e5499317494521aa0db686376ebbe2 2024-12-12T01:18:36,505 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7/cf/70cc5dea071a4469977f7ab855bc37e7, entries=47, sequenceid=6, filesize=8.1 K 2024-12-12T01:18:36,506 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf/cf/78e5499317494521aa0db686376ebbe2, entries=3, sequenceid=6, filesize=5.2 K 2024-12-12T01:18:36,506 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 5a9d347d075abc43e7359e9246fa23f7 in 38ms, sequenceid=6, compaction requested=false 2024-12-12T01:18:36,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-12T01:18:36,507 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 7c8ee88d2e52c9d5777852576a25ebbf in 38ms, sequenceid=6, compaction requested=false 2024-12-12T01:18:36,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for 5a9d347d075abc43e7359e9246fa23f7: 2024-12-12T01:18:36,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. for snaptb0-testConsecutiveExports completed. 2024-12-12T01:18:36,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for 7c8ee88d2e52c9d5777852576a25ebbf: 2024-12-12T01:18:36,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. for snaptb0-testConsecutiveExports completed. 2024-12-12T01:18:36,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-12T01:18:36,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-12T01:18:36,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:18:36,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:18:36,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7/cf/70cc5dea071a4469977f7ab855bc37e7] hfiles 2024-12-12T01:18:36,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf/cf/78e5499317494521aa0db686376ebbe2] hfiles 2024-12-12T01:18:36,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7/cf/70cc5dea071a4469977f7ab855bc37e7 for snapshot=snaptb0-testConsecutiveExports 2024-12-12T01:18:36,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf/cf/78e5499317494521aa0db686376ebbe2 for snapshot=snaptb0-testConsecutiveExports 2024-12-12T01:18:36,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742078_1254 (size=107) 2024-12-12T01:18:36,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742078_1254 (size=107) 2024-12-12T01:18:36,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742078_1254 (size=107) 2024-12-12T01:18:36,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:18:36,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-12T01:18:36,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-12-12T01:18:36,519 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:36,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742079_1255 (size=107) 2024-12-12T01:18:36,519 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:18:36,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742079_1255 (size=107) 2024-12-12T01:18:36,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. 2024-12-12T01:18:36,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-12-12T01:18:36,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-12-12T01:18:36,521 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:36,521 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:18:36,522 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7c8ee88d2e52c9d5777852576a25ebbf in 206 msec 2024-12-12T01:18:36,524 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=124, resume processing ppid=122 2024-12-12T01:18:36,524 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5a9d347d075abc43e7359e9246fa23f7 in 208 msec 2024-12-12T01:18:36,524 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:18:36,525 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:18:36,525 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:18:36,526 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-12T01:18:36,526 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-12T01:18:36,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742079_1255 (size=107) 2024-12-12T01:18:36,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742080_1256 (size=621) 2024-12-12T01:18:36,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742080_1256 (size=621) 2024-12-12T01:18:36,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742080_1256 (size=621) 2024-12-12T01:18:36,540 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:18:36,545 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:18:36,546 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-12T01:18:36,547 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:18:36,547 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-12T01:18:36,549 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 266 msec 2024-12-12T01:18:36,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-12T01:18:36,605 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-12T01:18:36,605 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605 2024-12-12T01:18:36,605 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605, srcFsUri=hdfs://localhost:33353, srcDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:18:36,636 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33353, inputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:18:36,636 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@24f86f6a, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-12T01:18:36,638 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-12T01:18:36,642 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-12T01:18:36,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:36,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:36,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:37,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-12902743195763308672.jar 2024-12-12T01:18:37,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:37,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:37,817 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-1669525791346362935.jar 2024-12-12T01:18:37,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:37,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:37,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:37,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:37,819 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:37,819 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:37,819 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T01:18:37,820 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T01:18:37,820 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T01:18:37,820 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T01:18:37,820 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T01:18:37,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T01:18:37,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T01:18:37,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T01:18:37,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T01:18:37,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T01:18:37,822 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T01:18:37,822 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:18:37,822 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:18:37,822 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:18:37,823 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:18:37,823 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:18:37,823 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:18:37,823 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:18:37,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742081_1257 (size=131440) 2024-12-12T01:18:37,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742081_1257 (size=131440) 2024-12-12T01:18:37,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742081_1257 (size=131440) 2024-12-12T01:18:37,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742082_1258 (size=4188619) 2024-12-12T01:18:37,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742082_1258 (size=4188619) 2024-12-12T01:18:37,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742082_1258 (size=4188619) 2024-12-12T01:18:37,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742083_1259 (size=1323991) 2024-12-12T01:18:37,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742083_1259 (size=1323991) 2024-12-12T01:18:37,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742083_1259 (size=1323991) 2024-12-12T01:18:37,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742084_1260 (size=6426224) 2024-12-12T01:18:37,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742084_1260 (size=6426224) 2024-12-12T01:18:37,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742084_1260 (size=6426224) 2024-12-12T01:18:37,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742085_1261 (size=903541) 2024-12-12T01:18:37,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742085_1261 (size=903541) 2024-12-12T01:18:37,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742085_1261 (size=903541) 2024-12-12T01:18:37,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742086_1262 (size=8360360) 2024-12-12T01:18:37,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742086_1262 (size=8360360) 2024-12-12T01:18:37,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742086_1262 (size=8360360) 2024-12-12T01:18:37,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742087_1263 (size=1877034) 2024-12-12T01:18:37,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742087_1263 (size=1877034) 2024-12-12T01:18:37,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742087_1263 (size=1877034) 2024-12-12T01:18:38,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742088_1264 (size=77835) 2024-12-12T01:18:38,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742088_1264 (size=77835) 2024-12-12T01:18:38,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742088_1264 (size=77835) 2024-12-12T01:18:38,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742089_1265 (size=30949) 2024-12-12T01:18:38,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742089_1265 (size=30949) 2024-12-12T01:18:38,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742089_1265 (size=30949) 2024-12-12T01:18:38,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742090_1266 (size=1597213) 2024-12-12T01:18:38,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742090_1266 (size=1597213) 2024-12-12T01:18:38,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742090_1266 (size=1597213) 2024-12-12T01:18:38,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742091_1267 (size=4695811) 2024-12-12T01:18:38,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742091_1267 (size=4695811) 2024-12-12T01:18:38,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742091_1267 (size=4695811) 2024-12-12T01:18:38,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742092_1268 (size=232957) 2024-12-12T01:18:38,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742092_1268 (size=232957) 2024-12-12T01:18:38,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742092_1268 (size=232957) 2024-12-12T01:18:38,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742093_1269 (size=127628) 2024-12-12T01:18:38,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742093_1269 (size=127628) 2024-12-12T01:18:38,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742093_1269 (size=127628) 2024-12-12T01:18:38,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742094_1270 (size=20406) 2024-12-12T01:18:38,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742094_1270 (size=20406) 2024-12-12T01:18:38,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742094_1270 (size=20406) 2024-12-12T01:18:38,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742095_1271 (size=443172) 2024-12-12T01:18:38,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742095_1271 (size=443172) 2024-12-12T01:18:38,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742095_1271 (size=443172) 2024-12-12T01:18:38,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742096_1272 (size=5175431) 2024-12-12T01:18:38,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742096_1272 (size=5175431) 2024-12-12T01:18:38,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742096_1272 (size=5175431) 2024-12-12T01:18:38,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742097_1273 (size=217634) 2024-12-12T01:18:38,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742097_1273 (size=217634) 2024-12-12T01:18:38,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742097_1273 (size=217634) 2024-12-12T01:18:38,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742098_1274 (size=1832290) 2024-12-12T01:18:38,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742098_1274 (size=1832290) 2024-12-12T01:18:38,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742098_1274 (size=1832290) 2024-12-12T01:18:38,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742099_1275 (size=322274) 2024-12-12T01:18:38,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742099_1275 (size=322274) 2024-12-12T01:18:38,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742099_1275 (size=322274) 2024-12-12T01:18:38,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742100_1276 (size=503880) 2024-12-12T01:18:38,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742100_1276 (size=503880) 2024-12-12T01:18:38,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742100_1276 (size=503880) 2024-12-12T01:18:38,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742101_1277 (size=29229) 2024-12-12T01:18:38,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742101_1277 (size=29229) 2024-12-12T01:18:38,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742101_1277 (size=29229) 2024-12-12T01:18:38,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742102_1278 (size=24096) 2024-12-12T01:18:38,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742102_1278 (size=24096) 2024-12-12T01:18:38,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742102_1278 (size=24096) 2024-12-12T01:18:38,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742103_1279 (size=111872) 2024-12-12T01:18:38,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742103_1279 (size=111872) 2024-12-12T01:18:38,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742103_1279 (size=111872) 2024-12-12T01:18:38,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742104_1280 (size=45609) 2024-12-12T01:18:38,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742104_1280 (size=45609) 2024-12-12T01:18:38,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742104_1280 (size=45609) 2024-12-12T01:18:38,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742105_1281 (size=136454) 2024-12-12T01:18:38,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742105_1281 (size=136454) 2024-12-12T01:18:38,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742105_1281 (size=136454) 2024-12-12T01:18:38,746 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T01:18:38,748 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-12T01:18:38,750 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-12T01:18:38,750 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-12T01:18:38,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742106_1282 (size=441) 2024-12-12T01:18:38,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742106_1282 (size=441) 2024-12-12T01:18:38,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742106_1282 (size=441) 2024-12-12T01:18:38,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742107_1283 (size=21) 2024-12-12T01:18:38,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742107_1283 (size=21) 2024-12-12T01:18:38,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742107_1283 (size=21) 2024-12-12T01:18:38,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742108_1284 (size=304047) 2024-12-12T01:18:38,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742108_1284 (size=304047) 2024-12-12T01:18:38,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742108_1284 (size=304047) 2024-12-12T01:18:39,698 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:18:39,698 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:18:39,701 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0004_000001 (auth:SIMPLE) from 127.0.0.1:59974 2024-12-12T01:18:39,717 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_0/usercache/jenkins/appcache/application_1733966109028_0004/container_1733966109028_0004_01_000001/launch_container.sh] 2024-12-12T01:18:39,717 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_0/usercache/jenkins/appcache/application_1733966109028_0004/container_1733966109028_0004_01_000001/container_tokens] 2024-12-12T01:18:39,717 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_0/usercache/jenkins/appcache/application_1733966109028_0004/container_1733966109028_0004_01_000001/sysfs] 2024-12-12T01:18:40,478 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:18:40,567 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0005_000001 (auth:SIMPLE) from 127.0.0.1:54396 2024-12-12T01:18:40,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-12T01:18:40,712 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-12T01:18:40,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-12T01:18:46,215 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:18:46,822 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0005_000001 (auth:SIMPLE) from 127.0.0.1:45964 2024-12-12T01:18:47,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742109_1285 (size=349745) 2024-12-12T01:18:47,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742109_1285 (size=349745) 2024-12-12T01:18:47,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742109_1285 (size=349745) 2024-12-12T01:18:49,443 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0005_000001 (auth:SIMPLE) from 127.0.0.1:39140 2024-12-12T01:18:49,455 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0005_000001 (auth:SIMPLE) from 127.0.0.1:49244 2024-12-12T01:18:58,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742110_1286 (size=22236) 2024-12-12T01:18:58,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742110_1286 (size=22236) 2024-12-12T01:18:58,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742110_1286 (size=22236) 2024-12-12T01:18:58,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742111_1287 (size=463) 2024-12-12T01:18:58,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742111_1287 (size=463) 2024-12-12T01:18:58,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742111_1287 (size=463) 2024-12-12T01:18:58,123 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0005/container_1733966109028_0005_01_000003/launch_container.sh] 2024-12-12T01:18:58,124 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0005/container_1733966109028_0005_01_000003/container_tokens] 2024-12-12T01:18:58,124 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0005/container_1733966109028_0005_01_000003/sysfs] 2024-12-12T01:18:58,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742112_1288 (size=22236) 2024-12-12T01:18:58,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742112_1288 (size=22236) 2024-12-12T01:18:58,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742112_1288 (size=22236) 2024-12-12T01:18:58,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742113_1289 (size=349745) 2024-12-12T01:18:58,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742113_1289 (size=349745) 2024-12-12T01:18:58,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742113_1289 (size=349745) 2024-12-12T01:18:58,166 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0005_000001 (auth:SIMPLE) from 127.0.0.1:49440 2024-12-12T01:18:58,910 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:18:59,383 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-12T01:18:59,383 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-12T01:18:59,385 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-12T01:18:59,386 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-12T01:18:59,386 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-12T01:18:59,386 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-12T01:18:59,387 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-12T01:18:59,387 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-12T01:18:59,387 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@24f86f6a in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-12T01:18:59,387 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-12T01:18:59,387 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-12T01:18:59,389 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605, srcFsUri=hdfs://localhost:33353, srcDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:18:59,420 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33353, inputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:18:59,420 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@24f86f6a, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-12T01:18:59,422 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-12T01:18:59,426 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-12T01:18:59,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:59,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:18:59,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:00,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-7740490114230224014.jar 2024-12-12T01:19:00,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:00,438 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:00,504 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-6572348948880930225.jar 2024-12-12T01:19:00,505 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:00,505 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:00,505 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:00,505 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:00,506 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:00,506 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:00,506 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T01:19:00,506 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T01:19:00,506 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T01:19:00,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T01:19:00,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T01:19:00,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T01:19:00,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T01:19:00,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T01:19:00,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T01:19:00,508 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T01:19:00,508 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T01:19:00,508 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:19:00,508 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:19:00,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:19:00,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:19:00,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:19:00,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:19:00,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:19:00,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742114_1290 (size=131440) 2024-12-12T01:19:00,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742114_1290 (size=131440) 2024-12-12T01:19:00,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742114_1290 (size=131440) 2024-12-12T01:19:00,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742115_1291 (size=4188619) 2024-12-12T01:19:00,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742115_1291 (size=4188619) 2024-12-12T01:19:00,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742115_1291 (size=4188619) 2024-12-12T01:19:00,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742116_1292 (size=1323991) 2024-12-12T01:19:00,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742116_1292 (size=1323991) 2024-12-12T01:19:00,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742116_1292 (size=1323991) 2024-12-12T01:19:00,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742117_1293 (size=903541) 2024-12-12T01:19:00,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742117_1293 (size=903541) 2024-12-12T01:19:00,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742117_1293 (size=903541) 2024-12-12T01:19:00,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742118_1294 (size=8360360) 2024-12-12T01:19:00,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742118_1294 (size=8360360) 2024-12-12T01:19:00,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742118_1294 (size=8360360) 2024-12-12T01:19:00,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742119_1295 (size=1877034) 2024-12-12T01:19:00,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742119_1295 (size=1877034) 2024-12-12T01:19:00,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742119_1295 (size=1877034) 2024-12-12T01:19:00,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742120_1296 (size=77835) 2024-12-12T01:19:00,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742120_1296 (size=77835) 2024-12-12T01:19:00,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742120_1296 (size=77835) 2024-12-12T01:19:00,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742121_1297 (size=443172) 2024-12-12T01:19:00,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742121_1297 (size=443172) 2024-12-12T01:19:00,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742121_1297 (size=443172) 2024-12-12T01:19:00,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742122_1298 (size=30949) 2024-12-12T01:19:00,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742122_1298 (size=30949) 2024-12-12T01:19:00,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742122_1298 (size=30949) 2024-12-12T01:19:00,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742123_1299 (size=1597213) 2024-12-12T01:19:00,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742123_1299 (size=1597213) 2024-12-12T01:19:00,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742123_1299 (size=1597213) 2024-12-12T01:19:00,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742124_1300 (size=4695811) 2024-12-12T01:19:00,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742124_1300 (size=4695811) 2024-12-12T01:19:00,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742124_1300 (size=4695811) 2024-12-12T01:19:00,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742125_1301 (size=232957) 2024-12-12T01:19:00,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742125_1301 (size=232957) 2024-12-12T01:19:00,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742125_1301 (size=232957) 2024-12-12T01:19:00,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742126_1302 (size=127628) 2024-12-12T01:19:00,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742126_1302 (size=127628) 2024-12-12T01:19:00,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742126_1302 (size=127628) 2024-12-12T01:19:00,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742127_1303 (size=20406) 2024-12-12T01:19:00,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742127_1303 (size=20406) 2024-12-12T01:19:00,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742127_1303 (size=20406) 2024-12-12T01:19:00,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742128_1304 (size=6426224) 2024-12-12T01:19:00,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742128_1304 (size=6426224) 2024-12-12T01:19:00,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742128_1304 (size=6426224) 2024-12-12T01:19:00,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742129_1305 (size=5175431) 2024-12-12T01:19:00,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742129_1305 (size=5175431) 2024-12-12T01:19:00,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742129_1305 (size=5175431) 2024-12-12T01:19:00,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742130_1306 (size=217634) 2024-12-12T01:19:00,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742130_1306 (size=217634) 2024-12-12T01:19:00,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742130_1306 (size=217634) 2024-12-12T01:19:00,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742131_1307 (size=1832290) 2024-12-12T01:19:00,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742131_1307 (size=1832290) 2024-12-12T01:19:00,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742131_1307 (size=1832290) 2024-12-12T01:19:00,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742132_1308 (size=322274) 2024-12-12T01:19:00,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742132_1308 (size=322274) 2024-12-12T01:19:00,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742132_1308 (size=322274) 2024-12-12T01:19:00,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742133_1309 (size=503880) 2024-12-12T01:19:00,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742133_1309 (size=503880) 2024-12-12T01:19:00,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742133_1309 (size=503880) 2024-12-12T01:19:00,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742134_1310 (size=29229) 2024-12-12T01:19:00,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742134_1310 (size=29229) 2024-12-12T01:19:00,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742134_1310 (size=29229) 2024-12-12T01:19:00,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742135_1311 (size=24096) 2024-12-12T01:19:00,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742135_1311 (size=24096) 2024-12-12T01:19:00,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742135_1311 (size=24096) 2024-12-12T01:19:00,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742136_1312 (size=111872) 2024-12-12T01:19:00,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742136_1312 (size=111872) 2024-12-12T01:19:00,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742136_1312 (size=111872) 2024-12-12T01:19:00,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742137_1313 (size=45609) 2024-12-12T01:19:00,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742137_1313 (size=45609) 2024-12-12T01:19:00,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742137_1313 (size=45609) 2024-12-12T01:19:00,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742138_1314 (size=136454) 2024-12-12T01:19:00,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742138_1314 (size=136454) 2024-12-12T01:19:00,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742138_1314 (size=136454) 2024-12-12T01:19:01,270 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T01:19:01,272 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-12T01:19:01,273 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-12T01:19:01,273 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-12T01:19:01,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742139_1315 (size=441) 2024-12-12T01:19:01,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742139_1315 (size=441) 2024-12-12T01:19:01,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742139_1315 (size=441) 2024-12-12T01:19:01,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742140_1316 (size=21) 2024-12-12T01:19:01,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742140_1316 (size=21) 2024-12-12T01:19:01,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742140_1316 (size=21) 2024-12-12T01:19:01,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742141_1317 (size=304045) 2024-12-12T01:19:01,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742141_1317 (size=304045) 2024-12-12T01:19:01,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742141_1317 (size=304045) 2024-12-12T01:19:01,904 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_0/usercache/jenkins/appcache/application_1733966109028_0005/container_1733966109028_0005_01_000002/launch_container.sh] 2024-12-12T01:19:01,904 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_0/usercache/jenkins/appcache/application_1733966109028_0005/container_1733966109028_0005_01_000002/container_tokens] 2024-12-12T01:19:01,904 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_0/usercache/jenkins/appcache/application_1733966109028_0005/container_1733966109028_0005_01_000002/sysfs] 2024-12-12T01:19:04,264 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:19:04,264 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:19:04,268 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0005_000001 (auth:SIMPLE) from 127.0.0.1:41210 2024-12-12T01:19:04,284 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_3/usercache/jenkins/appcache/application_1733966109028_0005/container_1733966109028_0005_01_000001/launch_container.sh] 2024-12-12T01:19:04,284 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_3/usercache/jenkins/appcache/application_1733966109028_0005/container_1733966109028_0005_01_000001/container_tokens] 2024-12-12T01:19:04,284 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_3/usercache/jenkins/appcache/application_1733966109028_0005/container_1733966109028_0005_01_000001/sysfs] 2024-12-12T01:19:05,093 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0006_000001 (auth:SIMPLE) from 127.0.0.1:59038 2024-12-12T01:19:06,436 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 7c8ee88d2e52c9d5777852576a25ebbf changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:19:06,436 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5a9d347d075abc43e7359e9246fa23f7 changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:19:11,648 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0006_000001 (auth:SIMPLE) from 127.0.0.1:55336 2024-12-12T01:19:12,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742142_1318 (size=349743) 2024-12-12T01:19:12,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742142_1318 (size=349743) 2024-12-12T01:19:12,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742142_1318 (size=349743) 2024-12-12T01:19:14,400 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0006_000001 (auth:SIMPLE) from 127.0.0.1:46186 2024-12-12T01:19:14,411 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0006_000001 (auth:SIMPLE) from 127.0.0.1:47380 2024-12-12T01:19:20,606 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7c8ee88d2e52c9d5777852576a25ebbf, had cached 0 bytes from a total of 5286 2024-12-12T01:19:20,607 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5a9d347d075abc43e7359e9246fa23f7, had cached 0 bytes from a total of 8326 2024-12-12T01:19:22,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742143_1319 (size=21201) 2024-12-12T01:19:22,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742143_1319 (size=21201) 2024-12-12T01:19:22,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742143_1319 (size=21201) 2024-12-12T01:19:22,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742144_1320 (size=463) 2024-12-12T01:19:22,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742144_1320 (size=463) 2024-12-12T01:19:22,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742144_1320 (size=463) 2024-12-12T01:19:22,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742145_1321 (size=21201) 2024-12-12T01:19:22,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742145_1321 (size=21201) 2024-12-12T01:19:22,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742145_1321 (size=21201) 2024-12-12T01:19:22,235 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_3/usercache/jenkins/appcache/application_1733966109028_0006/container_1733966109028_0006_01_000003/launch_container.sh] 2024-12-12T01:19:22,235 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_3/usercache/jenkins/appcache/application_1733966109028_0006/container_1733966109028_0006_01_000003/container_tokens] 2024-12-12T01:19:22,235 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_3/usercache/jenkins/appcache/application_1733966109028_0006/container_1733966109028_0006_01_000003/sysfs] 2024-12-12T01:19:22,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742146_1322 (size=349743) 2024-12-12T01:19:22,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742146_1322 (size=349743) 2024-12-12T01:19:22,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742146_1322 (size=349743) 2024-12-12T01:19:22,263 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0006_000001 (auth:SIMPLE) from 127.0.0.1:47388 2024-12-12T01:19:23,823 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-12T01:19:23,823 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-12T01:19:23,825 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-12T01:19:23,825 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-12T01:19:23,826 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-12T01:19:23,826 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-12T01:19:23,827 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-12T01:19:23,827 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-12T01:19:23,827 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@24f86f6a in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-12T01:19:23,828 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-12T01:19:23,828 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966316605/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-12T01:19:23,846 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-12T01:19:23,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-12T01:19:23,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-12T01:19:23,850 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966363850"}]},"ts":"1733966363850"} 2024-12-12T01:19:23,852 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-12T01:19:23,853 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-12T01:19:23,853 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-12T01:19:23,855 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7c8ee88d2e52c9d5777852576a25ebbf, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5a9d347d075abc43e7359e9246fa23f7, UNASSIGN}] 2024-12-12T01:19:23,856 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7c8ee88d2e52c9d5777852576a25ebbf, UNASSIGN 2024-12-12T01:19:23,856 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5a9d347d075abc43e7359e9246fa23f7, UNASSIGN 2024-12-12T01:19:23,857 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=7c8ee88d2e52c9d5777852576a25ebbf, regionState=CLOSING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:19:23,857 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=5a9d347d075abc43e7359e9246fa23f7, regionState=CLOSING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:19:23,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7c8ee88d2e52c9d5777852576a25ebbf, UNASSIGN because future has completed 2024-12-12T01:19:23,859 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:19:23,859 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7c8ee88d2e52c9d5777852576a25ebbf, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:19:23,859 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5a9d347d075abc43e7359e9246fa23f7, UNASSIGN because future has completed 2024-12-12T01:19:23,862 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:19:23,862 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5a9d347d075abc43e7359e9246fa23f7, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:19:23,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-12T01:19:24,012 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:19:24,013 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:19:24,013 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 7c8ee88d2e52c9d5777852576a25ebbf, disabling compactions & flushes 2024-12-12T01:19:24,013 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:19:24,013 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:19:24,013 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. after waiting 0 ms 2024-12-12T01:19:24,013 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:19:24,015 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:19:24,015 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:19:24,015 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing 5a9d347d075abc43e7359e9246fa23f7, disabling compactions & flushes 2024-12-12T01:19:24,015 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. 2024-12-12T01:19:24,015 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. 2024-12-12T01:19:24,015 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. after waiting 0 ms 2024-12-12T01:19:24,015 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. 2024-12-12T01:19:24,018 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:19:24,019 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:19:24,019 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf. 2024-12-12T01:19:24,019 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 7c8ee88d2e52c9d5777852576a25ebbf: Waiting for close lock at 1733966364013Running coprocessor pre-close hooks at 1733966364013Disabling compacts and flushes for region at 1733966364013Disabling writes for close at 1733966364013Writing region close event to WAL at 1733966364014 (+1 ms)Running coprocessor post-close hooks at 1733966364019 (+5 ms)Closed at 1733966364019 2024-12-12T01:19:24,019 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:19:24,020 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:19:24,020 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7. 2024-12-12T01:19:24,020 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for 5a9d347d075abc43e7359e9246fa23f7: Waiting for close lock at 1733966364015Running coprocessor pre-close hooks at 1733966364015Disabling compacts and flushes for region at 1733966364015Disabling writes for close at 1733966364015Writing region close event to WAL at 1733966364016 (+1 ms)Running coprocessor post-close hooks at 1733966364020 (+4 ms)Closed at 1733966364020 2024-12-12T01:19:24,021 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:19:24,021 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=7c8ee88d2e52c9d5777852576a25ebbf, regionState=CLOSED 2024-12-12T01:19:24,021 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed 5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:19:24,022 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=5a9d347d075abc43e7359e9246fa23f7, regionState=CLOSED 2024-12-12T01:19:24,023 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7c8ee88d2e52c9d5777852576a25ebbf, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:19:24,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5a9d347d075abc43e7359e9246fa23f7, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:19:24,025 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-12-12T01:19:24,025 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 7c8ee88d2e52c9d5777852576a25ebbf, server=36236b0d7605,33883,1733966101479 in 165 msec 2024-12-12T01:19:24,027 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=7c8ee88d2e52c9d5777852576a25ebbf, UNASSIGN in 170 msec 2024-12-12T01:19:24,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=128 2024-12-12T01:19:24,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure 5a9d347d075abc43e7359e9246fa23f7, server=36236b0d7605,38295,1733966101221 in 163 msec 2024-12-12T01:19:24,028 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=126 2024-12-12T01:19:24,028 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5a9d347d075abc43e7359e9246fa23f7, UNASSIGN in 172 msec 2024-12-12T01:19:24,030 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-12-12T01:19:24,030 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 176 msec 2024-12-12T01:19:24,031 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966364031"}]},"ts":"1733966364031"} 2024-12-12T01:19:24,033 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-12T01:19:24,033 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-12T01:19:24,034 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 187 msec 2024-12-12T01:19:24,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-12T01:19:24,165 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-12T01:19:24,166 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-12T01:19:24,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-12T01:19:24,168 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-12T01:19:24,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-12T01:19:24,168 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-12T01:19:24,171 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-12T01:19:24,172 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:19:24,173 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:19:24,174 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7/recovered.edits] 2024-12-12T01:19:24,174 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf/recovered.edits] 2024-12-12T01:19:24,178 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7/cf/70cc5dea071a4469977f7ab855bc37e7 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7/cf/70cc5dea071a4469977f7ab855bc37e7 2024-12-12T01:19:24,178 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf/cf/78e5499317494521aa0db686376ebbe2 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf/cf/78e5499317494521aa0db686376ebbe2 2024-12-12T01:19:24,181 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf/recovered.edits/9.seqid 2024-12-12T01:19:24,181 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7/recovered.edits/9.seqid 2024-12-12T01:19:24,181 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/7c8ee88d2e52c9d5777852576a25ebbf 2024-12-12T01:19:24,181 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testConsecutiveExports/5a9d347d075abc43e7359e9246fa23f7 2024-12-12T01:19:24,181 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-12T01:19:24,183 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-12T01:19:24,185 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-12T01:19:24,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-12T01:19:24,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-12T01:19:24,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-12T01:19:24,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-12T01:19:24,190 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-12T01:19:24,190 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-12T01:19:24,190 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-12T01:19:24,190 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-12T01:19:24,190 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-12T01:19:24,191 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-12T01:19:24,192 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-12T01:19:24,192 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966364192"}]},"ts":"9223372036854775807"} 2024-12-12T01:19:24,192 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966364192"}]},"ts":"9223372036854775807"} 2024-12-12T01:19:24,194 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-12T01:19:24,194 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 7c8ee88d2e52c9d5777852576a25ebbf, NAME => 'testtb-testConsecutiveExports,,1733966315250.7c8ee88d2e52c9d5777852576a25ebbf.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 5a9d347d075abc43e7359e9246fa23f7, NAME => 'testtb-testConsecutiveExports,1,1733966315250.5a9d347d075abc43e7359e9246fa23f7.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T01:19:24,194 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-12T01:19:24,194 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733966364194"}]},"ts":"9223372036854775807"} 2024-12-12T01:19:24,195 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-12-12T01:19:24,196 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-12T01:19:24,197 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 30 msec 2024-12-12T01:19:24,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-12T01:19:24,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-12T01:19:24,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-12T01:19:24,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:24,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-12T01:19:24,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:24,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:24,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-12T01:19:24,199 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-12-12T01:19:24,199 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-12T01:19:24,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:24,205 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-12-12T01:19:24,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-12T01:19:24,208 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-12-12T01:19:24,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-12T01:19:24,232 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=798 (was 797) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36795 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:37989 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-5037 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 1370) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_416211837_1 at /127.0.0.1:58878 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:60090 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:54870 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:42496 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37989 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=778 (was 788), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=828 (was 746) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=5779 (was 6140) 2024-12-12T01:19:24,232 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-12T01:19:24,254 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=798, OpenFileDescriptor=778, MaxFileDescriptor=1048576, SystemLoadAverage=828, ProcessCount=17, AvailableMemoryMB=5779 2024-12-12T01:19:24,254 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-12T01:19:24,256 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T01:19:24,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:24,259 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T01:19:24,259 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:24,259 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-12-12T01:19:24,260 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T01:19:24,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-12T01:19:24,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742147_1323 (size=422) 2024-12-12T01:19:24,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742147_1323 (size=422) 2024-12-12T01:19:24,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742147_1323 (size=422) 2024-12-12T01:19:24,278 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6bf52e50c78a758898cb8b5b8d262309, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:19:24,278 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 27b918ec566d8a122c845b476f66eb50, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:19:24,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742149_1325 (size=83) 2024-12-12T01:19:24,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742148_1324 (size=83) 2024-12-12T01:19:24,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742149_1325 (size=83) 2024-12-12T01:19:24,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742148_1324 (size=83) 2024-12-12T01:19:24,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742148_1324 (size=83) 2024-12-12T01:19:24,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742149_1325 (size=83) 2024-12-12T01:19:24,287 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:24,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 6bf52e50c78a758898cb8b5b8d262309, disabling compactions & flushes 2024-12-12T01:19:24,288 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:24,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:24,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. after waiting 0 ms 2024-12-12T01:19:24,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:24,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:24,288 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:24,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6bf52e50c78a758898cb8b5b8d262309: Waiting for close lock at 1733966364288Disabling compacts and flushes for region at 1733966364288Disabling writes for close at 1733966364288Writing region close event to WAL at 1733966364288Closed at 1733966364288 2024-12-12T01:19:24,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 27b918ec566d8a122c845b476f66eb50, disabling compactions & flushes 2024-12-12T01:19:24,288 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. 2024-12-12T01:19:24,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. 2024-12-12T01:19:24,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. after waiting 0 ms 2024-12-12T01:19:24,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. 2024-12-12T01:19:24,288 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. 2024-12-12T01:19:24,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 27b918ec566d8a122c845b476f66eb50: Waiting for close lock at 1733966364288Disabling compacts and flushes for region at 1733966364288Disabling writes for close at 1733966364288Writing region close event to WAL at 1733966364288Closed at 1733966364288 2024-12-12T01:19:24,290 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T01:19:24,290 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733966364290"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966364290"}]},"ts":"1733966364290"} 2024-12-12T01:19:24,290 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733966364290"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966364290"}]},"ts":"1733966364290"} 2024-12-12T01:19:24,293 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-12T01:19:24,294 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T01:19:24,294 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966364294"}]},"ts":"1733966364294"} 2024-12-12T01:19:24,296 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-12T01:19:24,296 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:19:24,297 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:19:24,297 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:19:24,297 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:19:24,297 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:19:24,297 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:19:24,297 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:19:24,297 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:19:24,297 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:19:24,297 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:19:24,297 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:19:24,297 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=6bf52e50c78a758898cb8b5b8d262309, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=27b918ec566d8a122c845b476f66eb50, ASSIGN}] 2024-12-12T01:19:24,298 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=27b918ec566d8a122c845b476f66eb50, ASSIGN 2024-12-12T01:19:24,298 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=6bf52e50c78a758898cb8b5b8d262309, ASSIGN 2024-12-12T01:19:24,299 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=27b918ec566d8a122c845b476f66eb50, ASSIGN; state=OFFLINE, location=36236b0d7605,44743,1733966101408; forceNewPlan=false, retain=false 2024-12-12T01:19:24,299 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=6bf52e50c78a758898cb8b5b8d262309, ASSIGN; state=OFFLINE, location=36236b0d7605,38295,1733966101221; forceNewPlan=false, retain=false 2024-12-12T01:19:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-12T01:19:24,449 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T01:19:24,450 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=6bf52e50c78a758898cb8b5b8d262309, regionState=OPENING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:19:24,450 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=27b918ec566d8a122c845b476f66eb50, regionState=OPENING, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:19:24,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=6bf52e50c78a758898cb8b5b8d262309, ASSIGN because future has completed 2024-12-12T01:19:24,452 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6bf52e50c78a758898cb8b5b8d262309, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:19:24,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=27b918ec566d8a122c845b476f66eb50, ASSIGN because future has completed 2024-12-12T01:19:24,453 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 27b918ec566d8a122c845b476f66eb50, server=36236b0d7605,44743,1733966101408}] 2024-12-12T01:19:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-12T01:19:24,607 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:24,607 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. 2024-12-12T01:19:24,608 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => 6bf52e50c78a758898cb8b5b8d262309, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T01:19:24,608 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => 27b918ec566d8a122c845b476f66eb50, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T01:19:24,608 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. service=AccessControlService 2024-12-12T01:19:24,608 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. service=AccessControlService 2024-12-12T01:19:24,608 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:19:24,608 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:19:24,608 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:24,608 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:24,608 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:24,608 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:24,609 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:24,609 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:24,609 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:24,609 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:24,610 INFO [StoreOpener-27b918ec566d8a122c845b476f66eb50-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:24,610 INFO [StoreOpener-6bf52e50c78a758898cb8b5b8d262309-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:24,611 INFO [StoreOpener-6bf52e50c78a758898cb8b5b8d262309-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6bf52e50c78a758898cb8b5b8d262309 columnFamilyName cf 2024-12-12T01:19:24,611 INFO [StoreOpener-27b918ec566d8a122c845b476f66eb50-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 27b918ec566d8a122c845b476f66eb50 columnFamilyName cf 2024-12-12T01:19:24,611 DEBUG [StoreOpener-6bf52e50c78a758898cb8b5b8d262309-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:24,611 DEBUG [StoreOpener-27b918ec566d8a122c845b476f66eb50-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:24,612 INFO [StoreOpener-6bf52e50c78a758898cb8b5b8d262309-1 {}] regionserver.HStore(327): Store=6bf52e50c78a758898cb8b5b8d262309/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:19:24,612 INFO [StoreOpener-27b918ec566d8a122c845b476f66eb50-1 {}] regionserver.HStore(327): Store=27b918ec566d8a122c845b476f66eb50/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:19:24,612 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:24,612 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:24,612 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:24,612 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:24,613 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:24,613 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:24,613 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:24,613 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:24,613 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:24,613 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:24,614 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:24,615 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:24,617 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:19:24,617 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened 6bf52e50c78a758898cb8b5b8d262309; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59552902, jitterRate=-0.11259260773658752}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:19:24,617 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:24,618 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for 6bf52e50c78a758898cb8b5b8d262309: Running coprocessor pre-open hook at 1733966364609Writing region info on filesystem at 1733966364609Initializing all the Stores at 1733966364609Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966364609Cleaning up temporary data from old regions at 1733966364613 (+4 ms)Running coprocessor post-open hooks at 1733966364617 (+4 ms)Region opened successfully at 1733966364618 (+1 ms) 2024-12-12T01:19:24,619 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309., pid=135, masterSystemTime=1733966364604 2024-12-12T01:19:24,621 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:24,621 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:19:24,621 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:24,621 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=6bf52e50c78a758898cb8b5b8d262309, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:19:24,621 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened 27b918ec566d8a122c845b476f66eb50; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66653480, jitterRate=-0.006785750389099121}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:19:24,621 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:24,621 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for 27b918ec566d8a122c845b476f66eb50: Running coprocessor pre-open hook at 1733966364609Writing region info on filesystem at 1733966364609Initializing all the Stores at 1733966364609Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966364609Cleaning up temporary data from old regions at 1733966364613 (+4 ms)Running coprocessor post-open hooks at 1733966364621 (+8 ms)Region opened successfully at 1733966364621 2024-12-12T01:19:24,622 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50., pid=136, masterSystemTime=1733966364604 2024-12-12T01:19:24,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6bf52e50c78a758898cb8b5b8d262309, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:19:24,623 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. 2024-12-12T01:19:24,623 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. 2024-12-12T01:19:24,624 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=27b918ec566d8a122c845b476f66eb50, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:19:24,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 27b918ec566d8a122c845b476f66eb50, server=36236b0d7605,44743,1733966101408 because future has completed 2024-12-12T01:19:24,626 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=133 2024-12-12T01:19:24,626 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 6bf52e50c78a758898cb8b5b8d262309, server=36236b0d7605,38295,1733966101221 in 172 msec 2024-12-12T01:19:24,627 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=6bf52e50c78a758898cb8b5b8d262309, ASSIGN in 329 msec 2024-12-12T01:19:24,628 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=134 2024-12-12T01:19:24,628 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure 27b918ec566d8a122c845b476f66eb50, server=36236b0d7605,44743,1733966101408 in 174 msec 2024-12-12T01:19:24,630 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=132 2024-12-12T01:19:24,630 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=27b918ec566d8a122c845b476f66eb50, ASSIGN in 331 msec 2024-12-12T01:19:24,631 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T01:19:24,631 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966364631"}]},"ts":"1733966364631"} 2024-12-12T01:19:24,633 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-12T01:19:24,634 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T01:19:24,634 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-12T01:19:24,637 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-12T01:19:24,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:24,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:24,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:24,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:24,642 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:24,642 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:24,642 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:24,644 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:24,644 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 386 msec 2024-12-12T01:19:24,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-12T01:19:24,885 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-12T01:19:24,885 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-12-12T01:19:24,885 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:19:24,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-12-12T01:19:24,889 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:19:24,889 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-12-12T01:19:24,889 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-12T01:19:24,892 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-12T01:19:24,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966364892 (current time:1733966364892). 2024-12-12T01:19:24,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:19:24,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-12T01:19:24,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:19:24,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25f9e492, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:24,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:19:24,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:19:24,894 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:19:24,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:19:24,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:19:24,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ebcdcc5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:24,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:19:24,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:19:24,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:24,896 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55302, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:19:24,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45e2778d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:24,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:19:24,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:19:24,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:19:24,899 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40926, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:19:24,900 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:19:24,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:19:24,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:24,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:24,900 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:19:24,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12ee2d76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:24,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:19:24,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:19:24,902 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:19:24,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:19:24,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:19:24,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45f3ed26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:24,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:19:24,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:19:24,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:24,903 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55308, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:19:24,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f71d3e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:24,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:19:24,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:19:24,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:19:24,906 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40932, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:19:24,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:19:24,909 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:19:24,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:19:24,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:24,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:24,909 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:19:24,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-12T01:19:24,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:19:24,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-12T01:19:24,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-12T01:19:24,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-12T01:19:24,912 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:19:24,913 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:19:24,915 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:19:24,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742150_1326 (size=215) 2024-12-12T01:19:24,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742150_1326 (size=215) 2024-12-12T01:19:24,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742150_1326 (size=215) 2024-12-12T01:19:24,922 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:19:24,922 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6bf52e50c78a758898cb8b5b8d262309}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 27b918ec566d8a122c845b476f66eb50}] 2024-12-12T01:19:24,923 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:24,923 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:25,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-12T01:19:25,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44743 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-12-12T01:19:25,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-12-12T01:19:25,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. 2024-12-12T01:19:25,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:25,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for 27b918ec566d8a122c845b476f66eb50: 2024-12-12T01:19:25,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 6bf52e50c78a758898cb8b5b8d262309: 2024-12-12T01:19:25,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-12T01:19:25,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-12T01:19:25,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:25,076 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:19:25,076 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:25,076 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:19:25,076 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:19:25,076 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:19:25,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742152_1328 (size=86) 2024-12-12T01:19:25,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742152_1328 (size=86) 2024-12-12T01:19:25,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742152_1328 (size=86) 2024-12-12T01:19:25,083 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:25,083 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-12-12T01:19:25,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-12-12T01:19:25,084 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:25,084 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:25,086 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6bf52e50c78a758898cb8b5b8d262309 in 163 msec 2024-12-12T01:19:25,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742151_1327 (size=86) 2024-12-12T01:19:25,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742151_1327 (size=86) 2024-12-12T01:19:25,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742151_1327 (size=86) 2024-12-12T01:19:25,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-12T01:19:25,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. 2024-12-12T01:19:25,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-12T01:19:25,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-12-12T01:19:25,494 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:25,494 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:25,498 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=139, resume processing ppid=137 2024-12-12T01:19:25,498 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 27b918ec566d8a122c845b476f66eb50 in 573 msec 2024-12-12T01:19:25,498 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:19:25,499 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:19:25,500 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:19:25,500 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:25,501 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:25,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742153_1329 (size=597) 2024-12-12T01:19:25,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742153_1329 (size=597) 2024-12-12T01:19:25,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742153_1329 (size=597) 2024-12-12T01:19:25,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-12T01:19:25,836 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_0/usercache/jenkins/appcache/application_1733966109028_0006/container_1733966109028_0006_01_000002/launch_container.sh] 2024-12-12T01:19:25,836 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_0/usercache/jenkins/appcache/application_1733966109028_0006/container_1733966109028_0006_01_000002/container_tokens] 2024-12-12T01:19:25,836 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_0/usercache/jenkins/appcache/application_1733966109028_0006/container_1733966109028_0006_01_000002/sysfs] 2024-12-12T01:19:25,930 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:19:25,934 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:19:25,935 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:25,936 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:19:25,936 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-12T01:19:25,938 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 1.0260 sec 2024-12-12T01:19:26,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-12T01:19:26,045 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-12T01:19:26,050 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='0b1f8a5c480261a8a902eaf20444672a3', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:19:26,051 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='1bd0b722e76e062aa503c4c509297c893', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:19:26,052 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='293c0f74af54acabcc6d91961daf4b27b', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:19:26,053 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='31d0e6b8c726e91a7dc8d0214cf3b2e27', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:19:26,054 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='446ad418e93e1be1d135deb6e41c2a521', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:19:26,058 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:19:26,061 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44743 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:19:26,062 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-12T01:19:26,065 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:26,065 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:26,066 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:19:26,068 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-12T01:19:26,075 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-12T01:19:26,082 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-12T01:19:26,085 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-12T01:19:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966366085 (current time:1733966366085). 2024-12-12T01:19:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:19:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-12T01:19:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:19:26,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ab3299, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:26,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:19:26,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:19:26,087 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:19:26,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:19:26,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:19:26,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3db467a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:26,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:19:26,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:19:26,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:26,088 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55322, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:19:26,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b8f9354, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:26,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:19:26,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:19:26,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:19:26,091 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40940, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:19:26,093 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:19:26,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:19:26,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:26,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:26,093 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:19:26,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@395230c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:26,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:19:26,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:19:26,095 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:19:26,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:19:26,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:19:26,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35f34603, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:26,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:19:26,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:19:26,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:26,097 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55338, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:19:26,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7475367, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:26,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:19:26,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:19:26,100 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:19:26,101 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40946, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:19:26,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:19:26,104 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:19:26,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:19:26,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:26,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:26,104 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:19:26,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-12T01:19:26,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:19:26,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-12T01:19:26,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-12T01:19:26,107 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:19:26,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-12T01:19:26,108 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:19:26,111 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:19:26,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742154_1330 (size=210) 2024-12-12T01:19:26,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742154_1330 (size=210) 2024-12-12T01:19:26,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742154_1330 (size=210) 2024-12-12T01:19:26,131 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:19:26,131 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6bf52e50c78a758898cb8b5b8d262309}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 27b918ec566d8a122c845b476f66eb50}] 2024-12-12T01:19:26,132 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:26,132 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:26,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-12T01:19:26,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44743 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-12T01:19:26,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-12-12T01:19:26,284 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. 2024-12-12T01:19:26,284 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:26,285 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 6bf52e50c78a758898cb8b5b8d262309 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-12T01:19:26,285 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing 27b918ec566d8a122c845b476f66eb50 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-12T01:19:26,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309/.tmp/cf/dde3dbdfa35a4d7e841e651729a5c685 is 71, key is 0b336f51b783c442f9458207c8318923/cf:q/1733966366058/Put/seqid=0 2024-12-12T01:19:26,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50/.tmp/cf/b7de830b3d30400f9bbf5f81ccbe0956 is 71, key is 11a2ee0e216d9e28ce0c5011444859dc/cf:q/1733966366060/Put/seqid=0 2024-12-12T01:19:26,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742156_1332 (size=5216) 2024-12-12T01:19:26,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742155_1331 (size=8392) 2024-12-12T01:19:26,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742155_1331 (size=8392) 2024-12-12T01:19:26,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742156_1332 (size=5216) 2024-12-12T01:19:26,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742156_1332 (size=5216) 2024-12-12T01:19:26,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742155_1331 (size=8392) 2024-12-12T01:19:26,308 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50/.tmp/cf/b7de830b3d30400f9bbf5f81ccbe0956 2024-12-12T01:19:26,308 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309/.tmp/cf/dde3dbdfa35a4d7e841e651729a5c685 2024-12-12T01:19:26,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309/.tmp/cf/dde3dbdfa35a4d7e841e651729a5c685 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309/cf/dde3dbdfa35a4d7e841e651729a5c685 2024-12-12T01:19:26,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50/.tmp/cf/b7de830b3d30400f9bbf5f81ccbe0956 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50/cf/b7de830b3d30400f9bbf5f81ccbe0956 2024-12-12T01:19:26,319 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309/cf/dde3dbdfa35a4d7e841e651729a5c685, entries=2, sequenceid=6, filesize=5.1 K 2024-12-12T01:19:26,319 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50/cf/b7de830b3d30400f9bbf5f81ccbe0956, entries=48, sequenceid=6, filesize=8.2 K 2024-12-12T01:19:26,320 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 6bf52e50c78a758898cb8b5b8d262309 in 36ms, sequenceid=6, compaction requested=false 2024-12-12T01:19:26,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-12T01:19:26,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 6bf52e50c78a758898cb8b5b8d262309: 2024-12-12T01:19:26,320 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 27b918ec566d8a122c845b476f66eb50 in 36ms, sequenceid=6, compaction requested=false 2024-12-12T01:19:26,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-12T01:19:26,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for 27b918ec566d8a122c845b476f66eb50: 2024-12-12T01:19:26,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:26,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-12T01:19:26,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:19:26,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309/cf/dde3dbdfa35a4d7e841e651729a5c685] hfiles 2024-12-12T01:19:26,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309/cf/dde3dbdfa35a4d7e841e651729a5c685 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:26,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:26,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:19:26,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50/cf/b7de830b3d30400f9bbf5f81ccbe0956] hfiles 2024-12-12T01:19:26,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50/cf/b7de830b3d30400f9bbf5f81ccbe0956 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:26,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742158_1334 (size=125) 2024-12-12T01:19:26,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742158_1334 (size=125) 2024-12-12T01:19:26,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742158_1334 (size=125) 2024-12-12T01:19:26,331 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:26,331 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-12T01:19:26,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-12-12T01:19:26,332 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:26,332 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:26,334 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6bf52e50c78a758898cb8b5b8d262309 in 202 msec 2024-12-12T01:19:26,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742157_1333 (size=125) 2024-12-12T01:19:26,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742157_1333 (size=125) 2024-12-12T01:19:26,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742157_1333 (size=125) 2024-12-12T01:19:26,342 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. 2024-12-12T01:19:26,342 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-12T01:19:26,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-12-12T01:19:26,343 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:26,343 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:26,345 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=142, resume processing ppid=140 2024-12-12T01:19:26,345 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 27b918ec566d8a122c845b476f66eb50 in 212 msec 2024-12-12T01:19:26,345 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:19:26,346 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:19:26,346 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:19:26,346 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:26,347 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:26,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742159_1335 (size=675) 2024-12-12T01:19:26,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742159_1335 (size=675) 2024-12-12T01:19:26,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742159_1335 (size=675) 2024-12-12T01:19:26,360 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:19:26,364 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:19:26,365 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:26,366 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:19:26,366 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-12T01:19:26,367 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 261 msec 2024-12-12T01:19:26,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-12T01:19:26,425 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-12T01:19:26,427 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T01:19:26,427 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T01:19:26,428 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T01:19:26,429 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40962, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T01:19:26,429 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45470, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T01:19:26,429 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38174, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T01:19:26,431 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T01:19:26,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:26,433 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T01:19:26,433 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:26,434 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-12-12T01:19:26,434 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T01:19:26,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-12T01:19:26,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742160_1336 (size=399) 2024-12-12T01:19:26,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742160_1336 (size=399) 2024-12-12T01:19:26,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742160_1336 (size=399) 2024-12-12T01:19:26,447 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cd5ddb0f1e06f57c3e0d77ec739d0a05, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:19:26,448 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ddca7b29418e8e04988981b41fe0793e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:19:26,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742162_1338 (size=85) 2024-12-12T01:19:26,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742162_1338 (size=85) 2024-12-12T01:19:26,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742161_1337 (size=85) 2024-12-12T01:19:26,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742161_1337 (size=85) 2024-12-12T01:19:26,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742162_1338 (size=85) 2024-12-12T01:19:26,460 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:26,460 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing cd5ddb0f1e06f57c3e0d77ec739d0a05, disabling compactions & flushes 2024-12-12T01:19:26,460 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05. 2024-12-12T01:19:26,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742161_1337 (size=85) 2024-12-12T01:19:26,460 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05. 2024-12-12T01:19:26,461 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05. after waiting 0 ms 2024-12-12T01:19:26,461 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05. 2024-12-12T01:19:26,461 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05. 2024-12-12T01:19:26,461 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for cd5ddb0f1e06f57c3e0d77ec739d0a05: Waiting for close lock at 1733966366460Disabling compacts and flushes for region at 1733966366460Disabling writes for close at 1733966366461 (+1 ms)Writing region close event to WAL at 1733966366461Closed at 1733966366461 2024-12-12T01:19:26,461 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:26,461 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing ddca7b29418e8e04988981b41fe0793e, disabling compactions & flushes 2024-12-12T01:19:26,461 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e. 2024-12-12T01:19:26,461 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e. 2024-12-12T01:19:26,461 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e. after waiting 0 ms 2024-12-12T01:19:26,461 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e. 2024-12-12T01:19:26,461 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e. 2024-12-12T01:19:26,461 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for ddca7b29418e8e04988981b41fe0793e: Waiting for close lock at 1733966366461Disabling compacts and flushes for region at 1733966366461Disabling writes for close at 1733966366461Writing region close event to WAL at 1733966366461Closed at 1733966366461 2024-12-12T01:19:26,463 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T01:19:26,463 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733966366463"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966366463"}]},"ts":"1733966366463"} 2024-12-12T01:19:26,463 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733966366463"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966366463"}]},"ts":"1733966366463"} 2024-12-12T01:19:26,466 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-12T01:19:26,467 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T01:19:26,467 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966366467"}]},"ts":"1733966366467"} 2024-12-12T01:19:26,469 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-12T01:19:26,469 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:19:26,470 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:19:26,470 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:19:26,470 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:19:26,470 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:19:26,470 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:19:26,470 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:19:26,470 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:19:26,470 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:19:26,470 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:19:26,470 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:19:26,471 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=cd5ddb0f1e06f57c3e0d77ec739d0a05, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddca7b29418e8e04988981b41fe0793e, ASSIGN}] 2024-12-12T01:19:26,472 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddca7b29418e8e04988981b41fe0793e, ASSIGN 2024-12-12T01:19:26,472 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=cd5ddb0f1e06f57c3e0d77ec739d0a05, ASSIGN 2024-12-12T01:19:26,472 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddca7b29418e8e04988981b41fe0793e, ASSIGN; state=OFFLINE, location=36236b0d7605,44743,1733966101408; forceNewPlan=false, retain=false 2024-12-12T01:19:26,472 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=cd5ddb0f1e06f57c3e0d77ec739d0a05, ASSIGN; state=OFFLINE, location=36236b0d7605,38295,1733966101221; forceNewPlan=false, retain=false 2024-12-12T01:19:26,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-12T01:19:26,623 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T01:19:26,623 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=cd5ddb0f1e06f57c3e0d77ec739d0a05, regionState=OPENING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:19:26,623 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=ddca7b29418e8e04988981b41fe0793e, regionState=OPENING, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:19:26,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddca7b29418e8e04988981b41fe0793e, ASSIGN because future has completed 2024-12-12T01:19:26,626 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure ddca7b29418e8e04988981b41fe0793e, server=36236b0d7605,44743,1733966101408}] 2024-12-12T01:19:26,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=cd5ddb0f1e06f57c3e0d77ec739d0a05, ASSIGN because future has completed 2024-12-12T01:19:26,627 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd5ddb0f1e06f57c3e0d77ec739d0a05, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:19:26,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-12T01:19:26,781 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e. 2024-12-12T01:19:26,781 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => ddca7b29418e8e04988981b41fe0793e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e.', STARTKEY => '2', ENDKEY => ''} 2024-12-12T01:19:26,781 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e. service=AccessControlService 2024-12-12T01:19:26,782 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:19:26,782 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:26,782 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:26,782 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:26,782 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:26,782 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05. 2024-12-12T01:19:26,783 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => cd5ddb0f1e06f57c3e0d77ec739d0a05, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05.', STARTKEY => '', ENDKEY => '2'} 2024-12-12T01:19:26,783 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05. service=AccessControlService 2024-12-12T01:19:26,783 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:19:26,783 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:26,783 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:26,783 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:26,783 INFO [StoreOpener-ddca7b29418e8e04988981b41fe0793e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:26,783 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:26,785 INFO [StoreOpener-cd5ddb0f1e06f57c3e0d77ec739d0a05-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:26,785 INFO [StoreOpener-ddca7b29418e8e04988981b41fe0793e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ddca7b29418e8e04988981b41fe0793e columnFamilyName cf 2024-12-12T01:19:26,785 DEBUG [StoreOpener-ddca7b29418e8e04988981b41fe0793e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:26,786 INFO [StoreOpener-ddca7b29418e8e04988981b41fe0793e-1 {}] regionserver.HStore(327): Store=ddca7b29418e8e04988981b41fe0793e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:19:26,786 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:26,786 INFO [StoreOpener-cd5ddb0f1e06f57c3e0d77ec739d0a05-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cd5ddb0f1e06f57c3e0d77ec739d0a05 columnFamilyName cf 2024-12-12T01:19:26,786 DEBUG [StoreOpener-cd5ddb0f1e06f57c3e0d77ec739d0a05-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:26,787 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:26,787 INFO [StoreOpener-cd5ddb0f1e06f57c3e0d77ec739d0a05-1 {}] regionserver.HStore(327): Store=cd5ddb0f1e06f57c3e0d77ec739d0a05/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:19:26,787 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:26,787 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:26,788 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:26,788 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:26,788 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:26,788 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:26,789 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:26,789 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:26,790 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:26,790 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:26,792 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:19:26,793 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened ddca7b29418e8e04988981b41fe0793e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72031675, jitterRate=0.07335560023784637}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:19:26,793 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:26,793 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for ddca7b29418e8e04988981b41fe0793e: Running coprocessor pre-open hook at 1733966366782Writing region info on filesystem at 1733966366782Initializing all the Stores at 1733966366783 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966366783Cleaning up temporary data from old regions at 1733966366788 (+5 ms)Running coprocessor post-open hooks at 1733966366793 (+5 ms)Region opened successfully at 1733966366793 2024-12-12T01:19:26,794 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e., pid=146, masterSystemTime=1733966366778 2024-12-12T01:19:26,796 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:19:26,796 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened cd5ddb0f1e06f57c3e0d77ec739d0a05; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72998810, jitterRate=0.08776703476905823}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:19:26,796 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:26,796 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for cd5ddb0f1e06f57c3e0d77ec739d0a05: Running coprocessor pre-open hook at 1733966366784Writing region info on filesystem at 1733966366784Initializing all the Stores at 1733966366784Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966366785 (+1 ms)Cleaning up temporary data from old regions at 1733966366789 (+4 ms)Running coprocessor post-open hooks at 1733966366796 (+7 ms)Region opened successfully at 1733966366796 2024-12-12T01:19:26,796 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e. 2024-12-12T01:19:26,796 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e. 2024-12-12T01:19:26,797 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05., pid=147, masterSystemTime=1733966366780 2024-12-12T01:19:26,797 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=ddca7b29418e8e04988981b41fe0793e, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:19:26,799 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05. 2024-12-12T01:19:26,799 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05. 2024-12-12T01:19:26,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure ddca7b29418e8e04988981b41fe0793e, server=36236b0d7605,44743,1733966101408 because future has completed 2024-12-12T01:19:26,799 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=cd5ddb0f1e06f57c3e0d77ec739d0a05, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:19:26,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd5ddb0f1e06f57c3e0d77ec739d0a05, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:19:26,802 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=145 2024-12-12T01:19:26,802 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure ddca7b29418e8e04988981b41fe0793e, server=36236b0d7605,44743,1733966101408 in 174 msec 2024-12-12T01:19:26,803 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddca7b29418e8e04988981b41fe0793e, ASSIGN in 331 msec 2024-12-12T01:19:26,805 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=144 2024-12-12T01:19:26,805 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure cd5ddb0f1e06f57c3e0d77ec739d0a05, server=36236b0d7605,38295,1733966101221 in 176 msec 2024-12-12T01:19:26,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=144, resume processing ppid=143 2024-12-12T01:19:26,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=cd5ddb0f1e06f57c3e0d77ec739d0a05, ASSIGN in 334 msec 2024-12-12T01:19:26,807 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T01:19:26,808 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966366807"}]},"ts":"1733966366807"} 2024-12-12T01:19:26,809 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-12T01:19:26,810 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T01:19:26,810 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-12T01:19:26,813 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-12T01:19:26,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:26,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:26,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:26,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:26,818 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:26,818 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:26,818 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:26,818 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:26,818 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:26,818 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:26,818 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:26,818 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:26,820 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 386 msec 2024-12-12T01:19:27,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-12T01:19:27,065 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-12T01:19:27,068 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:19:27,073 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:19:27,075 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-12-12T01:19:27,092 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [cd5ddb0f1e06f57c3e0d77ec739d0a05, ddca7b29418e8e04988981b41fe0793e] 2024-12-12T01:19:27,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[cd5ddb0f1e06f57c3e0d77ec739d0a05, ddca7b29418e8e04988981b41fe0793e], force=true 2024-12-12T01:19:27,099 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[cd5ddb0f1e06f57c3e0d77ec739d0a05, ddca7b29418e8e04988981b41fe0793e], force=true 2024-12-12T01:19:27,099 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[cd5ddb0f1e06f57c3e0d77ec739d0a05, ddca7b29418e8e04988981b41fe0793e], force=true 2024-12-12T01:19:27,099 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[cd5ddb0f1e06f57c3e0d77ec739d0a05, ddca7b29418e8e04988981b41fe0793e], force=true 2024-12-12T01:19:27,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-12T01:19:27,107 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=cd5ddb0f1e06f57c3e0d77ec739d0a05, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddca7b29418e8e04988981b41fe0793e, UNASSIGN}] 2024-12-12T01:19:27,108 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=cd5ddb0f1e06f57c3e0d77ec739d0a05, UNASSIGN 2024-12-12T01:19:27,108 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddca7b29418e8e04988981b41fe0793e, UNASSIGN 2024-12-12T01:19:27,109 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=cd5ddb0f1e06f57c3e0d77ec739d0a05, regionState=CLOSING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:19:27,109 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=ddca7b29418e8e04988981b41fe0793e, regionState=CLOSING, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:19:27,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=cd5ddb0f1e06f57c3e0d77ec739d0a05, UNASSIGN because future has completed 2024-12-12T01:19:27,111 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-12T01:19:27,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure cd5ddb0f1e06f57c3e0d77ec739d0a05, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:19:27,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddca7b29418e8e04988981b41fe0793e, UNASSIGN because future has completed 2024-12-12T01:19:27,112 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-12T01:19:27,112 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure ddca7b29418e8e04988981b41fe0793e, server=36236b0d7605,44743,1733966101408}] 2024-12-12T01:19:27,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-12T01:19:27,264 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:27,264 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-12T01:19:27,264 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing cd5ddb0f1e06f57c3e0d77ec739d0a05, disabling compactions & flushes 2024-12-12T01:19:27,264 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05. 2024-12-12T01:19:27,264 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05. 2024-12-12T01:19:27,264 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05. after waiting 0 ms 2024-12-12T01:19:27,265 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05. 2024-12-12T01:19:27,265 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing cd5ddb0f1e06f57c3e0d77ec739d0a05 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-12T01:19:27,265 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:27,265 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-12T01:19:27,265 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing ddca7b29418e8e04988981b41fe0793e, disabling compactions & flushes 2024-12-12T01:19:27,265 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e. 2024-12-12T01:19:27,265 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e. 2024-12-12T01:19:27,265 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e. after waiting 0 ms 2024-12-12T01:19:27,265 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e. 2024-12-12T01:19:27,265 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing ddca7b29418e8e04988981b41fe0793e 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-12T01:19:27,282 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05/.tmp/cf/401967155f804a688b0cb0fc2ca3159d is 28, key is 1/cf:/1733966367069/Put/seqid=0 2024-12-12T01:19:27,288 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e/.tmp/cf/6449ecaf49384d9fa8835dbbfeaa2817 is 28, key is 2/cf:/1733966367074/Put/seqid=0 2024-12-12T01:19:27,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742163_1339 (size=4945) 2024-12-12T01:19:27,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742163_1339 (size=4945) 2024-12-12T01:19:27,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742163_1339 (size=4945) 2024-12-12T01:19:27,291 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05/.tmp/cf/401967155f804a688b0cb0fc2ca3159d 2024-12-12T01:19:27,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742164_1340 (size=4945) 2024-12-12T01:19:27,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742164_1340 (size=4945) 2024-12-12T01:19:27,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742164_1340 (size=4945) 2024-12-12T01:19:27,295 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e/.tmp/cf/6449ecaf49384d9fa8835dbbfeaa2817 2024-12-12T01:19:27,297 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05/.tmp/cf/401967155f804a688b0cb0fc2ca3159d as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05/cf/401967155f804a688b0cb0fc2ca3159d 2024-12-12T01:19:27,301 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e/.tmp/cf/6449ecaf49384d9fa8835dbbfeaa2817 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e/cf/6449ecaf49384d9fa8835dbbfeaa2817 2024-12-12T01:19:27,302 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05/cf/401967155f804a688b0cb0fc2ca3159d, entries=1, sequenceid=5, filesize=4.8 K 2024-12-12T01:19:27,303 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for cd5ddb0f1e06f57c3e0d77ec739d0a05 in 38ms, sequenceid=5, compaction requested=false 2024-12-12T01:19:27,303 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-12T01:19:27,307 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e/cf/6449ecaf49384d9fa8835dbbfeaa2817, entries=1, sequenceid=5, filesize=4.8 K 2024-12-12T01:19:27,307 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for ddca7b29418e8e04988981b41fe0793e in 42ms, sequenceid=5, compaction requested=false 2024-12-12T01:19:27,309 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-12T01:19:27,309 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:19:27,309 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05. 2024-12-12T01:19:27,310 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for cd5ddb0f1e06f57c3e0d77ec739d0a05: Waiting for close lock at 1733966367264Running coprocessor pre-close hooks at 1733966367264Disabling compacts and flushes for region at 1733966367264Disabling writes for close at 1733966367264Obtaining lock to block concurrent updates at 1733966367265 (+1 ms)Preparing flush snapshotting stores in cd5ddb0f1e06f57c3e0d77ec739d0a05 at 1733966367265Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733966367265Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05. at 1733966367266 (+1 ms)Flushing cd5ddb0f1e06f57c3e0d77ec739d0a05/cf: creating writer at 1733966367266Flushing cd5ddb0f1e06f57c3e0d77ec739d0a05/cf: appending metadata at 1733966367281 (+15 ms)Flushing cd5ddb0f1e06f57c3e0d77ec739d0a05/cf: closing flushed file at 1733966367282 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fc2f3d4: reopening flushed file at 1733966367297 (+15 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for cd5ddb0f1e06f57c3e0d77ec739d0a05 in 38ms, sequenceid=5, compaction requested=false at 1733966367303 (+6 ms)Writing region close event to WAL at 1733966367305 (+2 ms)Running coprocessor post-close hooks at 1733966367309 (+4 ms)Closed at 1733966367309 2024-12-12T01:19:27,312 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:27,312 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-12T01:19:27,312 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:19:27,312 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e. 2024-12-12T01:19:27,313 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for ddca7b29418e8e04988981b41fe0793e: Waiting for close lock at 1733966367265Running coprocessor pre-close hooks at 1733966367265Disabling compacts and flushes for region at 1733966367265Disabling writes for close at 1733966367265Obtaining lock to block concurrent updates at 1733966367265Preparing flush snapshotting stores in ddca7b29418e8e04988981b41fe0793e at 1733966367265Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733966367266 (+1 ms)Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e. at 1733966367266Flushing ddca7b29418e8e04988981b41fe0793e/cf: creating writer at 1733966367266Flushing ddca7b29418e8e04988981b41fe0793e/cf: appending metadata at 1733966367287 (+21 ms)Flushing ddca7b29418e8e04988981b41fe0793e/cf: closing flushed file at 1733966367287Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@218775b2: reopening flushed file at 1733966367300 (+13 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for ddca7b29418e8e04988981b41fe0793e in 42ms, sequenceid=5, compaction requested=false at 1733966367307 (+7 ms)Writing region close event to WAL at 1733966367308 (+1 ms)Running coprocessor post-close hooks at 1733966367312 (+4 ms)Closed at 1733966367312 2024-12-12T01:19:27,313 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=cd5ddb0f1e06f57c3e0d77ec739d0a05, regionState=CLOSED 2024-12-12T01:19:27,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure cd5ddb0f1e06f57c3e0d77ec739d0a05, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:19:27,316 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:27,316 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=ddca7b29418e8e04988981b41fe0793e, regionState=CLOSED 2024-12-12T01:19:27,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure ddca7b29418e8e04988981b41fe0793e, server=36236b0d7605,44743,1733966101408 because future has completed 2024-12-12T01:19:27,321 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=149 2024-12-12T01:19:27,321 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure cd5ddb0f1e06f57c3e0d77ec739d0a05, server=36236b0d7605,38295,1733966101221 in 206 msec 2024-12-12T01:19:27,323 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=cd5ddb0f1e06f57c3e0d77ec739d0a05, UNASSIGN in 214 msec 2024-12-12T01:19:27,324 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=150 2024-12-12T01:19:27,324 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure ddca7b29418e8e04988981b41fe0793e, server=36236b0d7605,44743,1733966101408 in 210 msec 2024-12-12T01:19:27,327 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=150, resume processing ppid=148 2024-12-12T01:19:27,327 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=ddca7b29418e8e04988981b41fe0793e, UNASSIGN in 217 msec 2024-12-12T01:19:27,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742165_1341 (size=84) 2024-12-12T01:19:27,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742165_1341 (size=84) 2024-12-12T01:19:27,348 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:27,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742165_1341 (size=84) 2024-12-12T01:19:27,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742166_1342 (size=20) 2024-12-12T01:19:27,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742166_1342 (size=20) 2024-12-12T01:19:27,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742166_1342 (size=20) 2024-12-12T01:19:27,377 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:27,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742167_1343 (size=21) 2024-12-12T01:19:27,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742167_1343 (size=21) 2024-12-12T01:19:27,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742167_1343 (size=21) 2024-12-12T01:19:27,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742168_1344 (size=84) 2024-12-12T01:19:27,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742168_1344 (size=84) 2024-12-12T01:19:27,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742168_1344 (size=84) 2024-12-12T01:19:27,409 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:27,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-12T01:19:27,417 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-12T01:19:27,420 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366431.cd5ddb0f1e06f57c3e0d77ec739d0a05.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-12T01:19:27,420 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733966366431.ddca7b29418e8e04988981b41fe0793e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-12T01:19:27,421 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-12T01:19:27,425 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fd36ddb471cee7fb43bda32dd22b1fd4, ASSIGN}] 2024-12-12T01:19:27,427 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fd36ddb471cee7fb43bda32dd22b1fd4, ASSIGN 2024-12-12T01:19:27,428 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fd36ddb471cee7fb43bda32dd22b1fd4, ASSIGN; state=MERGED, location=36236b0d7605,38295,1733966101221; forceNewPlan=false, retain=false 2024-12-12T01:19:27,578 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-12T01:19:27,579 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=fd36ddb471cee7fb43bda32dd22b1fd4, regionState=OPENING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:19:27,581 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fd36ddb471cee7fb43bda32dd22b1fd4, ASSIGN because future has completed 2024-12-12T01:19:27,581 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure fd36ddb471cee7fb43bda32dd22b1fd4, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:19:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-12T01:19:27,736 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4. 2024-12-12T01:19:27,736 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => fd36ddb471cee7fb43bda32dd22b1fd4, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4.', STARTKEY => '', ENDKEY => ''} 2024-12-12T01:19:27,736 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4. service=AccessControlService 2024-12-12T01:19:27,737 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:19:27,737 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:27,737 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:27,737 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:27,737 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:27,739 INFO [StoreOpener-fd36ddb471cee7fb43bda32dd22b1fd4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:27,740 INFO [StoreOpener-fd36ddb471cee7fb43bda32dd22b1fd4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fd36ddb471cee7fb43bda32dd22b1fd4 columnFamilyName cf 2024-12-12T01:19:27,740 DEBUG [StoreOpener-fd36ddb471cee7fb43bda32dd22b1fd4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:27,749 DEBUG [StoreOpener-fd36ddb471cee7fb43bda32dd22b1fd4-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/cf/401967155f804a688b0cb0fc2ca3159d.cd5ddb0f1e06f57c3e0d77ec739d0a05->hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05/cf/401967155f804a688b0cb0fc2ca3159d-top 2024-12-12T01:19:27,754 DEBUG [StoreOpener-fd36ddb471cee7fb43bda32dd22b1fd4-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/cf/6449ecaf49384d9fa8835dbbfeaa2817.ddca7b29418e8e04988981b41fe0793e->hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e/cf/6449ecaf49384d9fa8835dbbfeaa2817-top 2024-12-12T01:19:27,755 INFO [StoreOpener-fd36ddb471cee7fb43bda32dd22b1fd4-1 {}] regionserver.HStore(327): Store=fd36ddb471cee7fb43bda32dd22b1fd4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:19:27,755 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:27,756 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:27,757 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:27,757 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:27,757 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:27,759 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:27,760 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened fd36ddb471cee7fb43bda32dd22b1fd4; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63536403, jitterRate=-0.05323381721973419}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:19:27,760 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:27,761 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for fd36ddb471cee7fb43bda32dd22b1fd4: Running coprocessor pre-open hook at 1733966367737Writing region info on filesystem at 1733966367737Initializing all the Stores at 1733966367738 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966367738Cleaning up temporary data from old regions at 1733966367757 (+19 ms)Running coprocessor post-open hooks at 1733966367760 (+3 ms)Region opened successfully at 1733966367761 (+1 ms) 2024-12-12T01:19:27,761 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4., pid=154, masterSystemTime=1733966367732 2024-12-12T01:19:27,762 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4.,because compaction is disabled. 2024-12-12T01:19:27,764 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4. 2024-12-12T01:19:27,764 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4. 2024-12-12T01:19:27,764 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=fd36ddb471cee7fb43bda32dd22b1fd4, regionState=OPEN, openSeqNum=9, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:19:27,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure fd36ddb471cee7fb43bda32dd22b1fd4, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:19:27,770 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-12-12T01:19:27,770 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure fd36ddb471cee7fb43bda32dd22b1fd4, server=36236b0d7605,38295,1733966101221 in 186 msec 2024-12-12T01:19:27,772 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-12-12T01:19:27,772 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fd36ddb471cee7fb43bda32dd22b1fd4, ASSIGN in 345 msec 2024-12-12T01:19:27,775 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[cd5ddb0f1e06f57c3e0d77ec739d0a05, ddca7b29418e8e04988981b41fe0793e], force=true in 678 msec 2024-12-12T01:19:28,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-12T01:19:28,239 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-12T01:19:28,240 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-12T01:19:28,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966368240 (current time:1733966368240). 2024-12-12T01:19:28,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:19:28,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-12T01:19:28,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:19:28,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40c0713f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:28,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:19:28,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:19:28,244 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:19:28,244 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:19:28,244 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:19:28,244 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@391aff73, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:28,244 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:19:28,245 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:19:28,245 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:28,246 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55368, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:19:28,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61422e4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:28,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:19:28,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:19:28,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:19:28,250 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40978, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:19:28,252 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:19:28,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:19:28,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:28,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:28,254 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:19:28,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f22fad2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:28,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:19:28,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:19:28,257 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:19:28,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:19:28,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:19:28,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54b80686, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:28,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:19:28,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:19:28,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:28,260 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55398, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:19:28,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43dcfe98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:28,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:19:28,264 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:19:28,264 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:19:28,266 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40980, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:19:28,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:19:28,271 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:19:28,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:19:28,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:28,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:28,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-12T01:19:28,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:19:28,273 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:19:28,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-12T01:19:28,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-12T01:19:28,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-12T01:19:28,276 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:19:28,278 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:19:28,281 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:19:28,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742169_1345 (size=216) 2024-12-12T01:19:28,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742169_1345 (size=216) 2024-12-12T01:19:28,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742169_1345 (size=216) 2024-12-12T01:19:28,305 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:19:28,305 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fd36ddb471cee7fb43bda32dd22b1fd4}] 2024-12-12T01:19:28,307 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:28,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-12T01:19:28,391 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0006_000001 (auth:SIMPLE) from 127.0.0.1:55826 2024-12-12T01:19:28,408 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_2/usercache/jenkins/appcache/application_1733966109028_0006/container_1733966109028_0006_01_000001/launch_container.sh] 2024-12-12T01:19:28,408 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_2/usercache/jenkins/appcache/application_1733966109028_0006/container_1733966109028_0006_01_000001/container_tokens] 2024-12-12T01:19:28,408 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_2/usercache/jenkins/appcache/application_1733966109028_0006/container_1733966109028_0006_01_000001/sysfs] 2024-12-12T01:19:28,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-12-12T01:19:28,460 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4. 2024-12-12T01:19:28,460 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for fd36ddb471cee7fb43bda32dd22b1fd4: 2024-12-12T01:19:28,460 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-12T01:19:28,461 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:28,461 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:19:28,461 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/cf/401967155f804a688b0cb0fc2ca3159d.cd5ddb0f1e06f57c3e0d77ec739d0a05->hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05/cf/401967155f804a688b0cb0fc2ca3159d-top, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/cf/6449ecaf49384d9fa8835dbbfeaa2817.ddca7b29418e8e04988981b41fe0793e->hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e/cf/6449ecaf49384d9fa8835dbbfeaa2817-top] hfiles 2024-12-12T01:19:28,461 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/cf/401967155f804a688b0cb0fc2ca3159d.cd5ddb0f1e06f57c3e0d77ec739d0a05 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:28,462 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/cf/6449ecaf49384d9fa8835dbbfeaa2817.ddca7b29418e8e04988981b41fe0793e for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:28,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742170_1346 (size=269) 2024-12-12T01:19:28,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742170_1346 (size=269) 2024-12-12T01:19:28,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742170_1346 (size=269) 2024-12-12T01:19:28,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4. 2024-12-12T01:19:28,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-12T01:19:28,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-12-12T01:19:28,480 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:28,480 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:28,483 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-12-12T01:19:28,483 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fd36ddb471cee7fb43bda32dd22b1fd4 in 176 msec 2024-12-12T01:19:28,483 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:19:28,485 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:19:28,486 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:19:28,486 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:28,487 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:28,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742171_1347 (size=670) 2024-12-12T01:19:28,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742171_1347 (size=670) 2024-12-12T01:19:28,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742171_1347 (size=670) 2024-12-12T01:19:28,542 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:19:28,553 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:19:28,554 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:28,556 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:19:28,556 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-12T01:19:28,558 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 283 msec 2024-12-12T01:19:28,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-12T01:19:28,596 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-12T01:19:28,596 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966368596 2024-12-12T01:19:28,597 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33353, tgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966368596, rawTgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966368596, srcFsUri=hdfs://localhost:33353, srcDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:19:28,643 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33353, inputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:19:28,643 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966368596, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966368596/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:28,647 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-12T01:19:28,657 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966368596/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:28,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742172_1348 (size=216) 2024-12-12T01:19:28,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742173_1349 (size=670) 2024-12-12T01:19:28,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742172_1348 (size=216) 2024-12-12T01:19:28,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742173_1349 (size=670) 2024-12-12T01:19:28,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742172_1348 (size=216) 2024-12-12T01:19:28,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742173_1349 (size=670) 2024-12-12T01:19:28,698 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:28,699 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:28,699 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:28,910 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:19:29,569 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:19:30,021 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-1615046504547917380.jar 2024-12-12T01:19:30,022 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:30,022 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:30,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-6052325711957211678.jar 2024-12-12T01:19:30,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:30,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:30,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:30,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:30,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:30,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:19:30,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T01:19:30,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T01:19:30,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T01:19:30,148 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T01:19:30,148 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T01:19:30,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T01:19:30,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T01:19:30,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T01:19:30,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T01:19:30,150 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T01:19:30,150 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T01:19:30,151 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:19:30,151 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:19:30,151 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:19:30,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:19:30,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:19:30,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:19:30,153 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:19:30,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742174_1350 (size=131440) 2024-12-12T01:19:30,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742174_1350 (size=131440) 2024-12-12T01:19:30,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742174_1350 (size=131440) 2024-12-12T01:19:30,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742175_1351 (size=4188619) 2024-12-12T01:19:30,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742175_1351 (size=4188619) 2024-12-12T01:19:30,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742175_1351 (size=4188619) 2024-12-12T01:19:30,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742176_1352 (size=1323991) 2024-12-12T01:19:30,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742176_1352 (size=1323991) 2024-12-12T01:19:30,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742176_1352 (size=1323991) 2024-12-12T01:19:30,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742177_1353 (size=903541) 2024-12-12T01:19:30,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742177_1353 (size=903541) 2024-12-12T01:19:30,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742177_1353 (size=903541) 2024-12-12T01:19:30,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742178_1354 (size=8360360) 2024-12-12T01:19:30,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742178_1354 (size=8360360) 2024-12-12T01:19:30,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742178_1354 (size=8360360) 2024-12-12T01:19:30,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742179_1355 (size=1877034) 2024-12-12T01:19:30,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742179_1355 (size=1877034) 2024-12-12T01:19:30,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742179_1355 (size=1877034) 2024-12-12T01:19:30,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742180_1356 (size=77835) 2024-12-12T01:19:30,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742180_1356 (size=77835) 2024-12-12T01:19:30,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742180_1356 (size=77835) 2024-12-12T01:19:30,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742181_1357 (size=30949) 2024-12-12T01:19:30,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742181_1357 (size=30949) 2024-12-12T01:19:30,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742181_1357 (size=30949) 2024-12-12T01:19:30,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742182_1358 (size=1597213) 2024-12-12T01:19:30,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742182_1358 (size=1597213) 2024-12-12T01:19:30,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742182_1358 (size=1597213) 2024-12-12T01:19:30,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742183_1359 (size=6426224) 2024-12-12T01:19:30,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742183_1359 (size=6426224) 2024-12-12T01:19:30,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742183_1359 (size=6426224) 2024-12-12T01:19:30,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742184_1360 (size=4695811) 2024-12-12T01:19:30,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742184_1360 (size=4695811) 2024-12-12T01:19:30,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742184_1360 (size=4695811) 2024-12-12T01:19:30,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:30,712 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-12T01:19:30,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:30,713 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-12T01:19:30,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-12T01:19:30,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742185_1361 (size=232957) 2024-12-12T01:19:30,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742185_1361 (size=232957) 2024-12-12T01:19:30,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742185_1361 (size=232957) 2024-12-12T01:19:30,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742186_1362 (size=127628) 2024-12-12T01:19:30,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742186_1362 (size=127628) 2024-12-12T01:19:30,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742186_1362 (size=127628) 2024-12-12T01:19:30,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742187_1363 (size=20406) 2024-12-12T01:19:30,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742187_1363 (size=20406) 2024-12-12T01:19:30,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742187_1363 (size=20406) 2024-12-12T01:19:31,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742188_1364 (size=5175431) 2024-12-12T01:19:31,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742188_1364 (size=5175431) 2024-12-12T01:19:31,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742188_1364 (size=5175431) 2024-12-12T01:19:31,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742189_1365 (size=217634) 2024-12-12T01:19:31,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742189_1365 (size=217634) 2024-12-12T01:19:31,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742189_1365 (size=217634) 2024-12-12T01:19:31,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742190_1366 (size=1832290) 2024-12-12T01:19:31,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742190_1366 (size=1832290) 2024-12-12T01:19:31,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742190_1366 (size=1832290) 2024-12-12T01:19:31,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742191_1367 (size=322274) 2024-12-12T01:19:31,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742191_1367 (size=322274) 2024-12-12T01:19:31,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742191_1367 (size=322274) 2024-12-12T01:19:31,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742192_1368 (size=503880) 2024-12-12T01:19:31,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742192_1368 (size=503880) 2024-12-12T01:19:31,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742192_1368 (size=503880) 2024-12-12T01:19:31,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742193_1369 (size=443172) 2024-12-12T01:19:31,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742193_1369 (size=443172) 2024-12-12T01:19:31,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742193_1369 (size=443172) 2024-12-12T01:19:31,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742194_1370 (size=29229) 2024-12-12T01:19:31,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742194_1370 (size=29229) 2024-12-12T01:19:31,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742194_1370 (size=29229) 2024-12-12T01:19:31,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742195_1371 (size=24096) 2024-12-12T01:19:31,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742195_1371 (size=24096) 2024-12-12T01:19:31,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742195_1371 (size=24096) 2024-12-12T01:19:31,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742196_1372 (size=111872) 2024-12-12T01:19:31,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742196_1372 (size=111872) 2024-12-12T01:19:31,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742196_1372 (size=111872) 2024-12-12T01:19:31,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742197_1373 (size=45609) 2024-12-12T01:19:31,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742197_1373 (size=45609) 2024-12-12T01:19:31,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742197_1373 (size=45609) 2024-12-12T01:19:31,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742198_1374 (size=136454) 2024-12-12T01:19:31,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742198_1374 (size=136454) 2024-12-12T01:19:31,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742198_1374 (size=136454) 2024-12-12T01:19:31,258 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T01:19:31,261 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-12T01:19:31,265 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-12-12T01:19:31,265 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-12-12T01:19:31,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742199_1375 (size=481) 2024-12-12T01:19:31,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742199_1375 (size=481) 2024-12-12T01:19:31,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742199_1375 (size=481) 2024-12-12T01:19:31,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742200_1376 (size=21) 2024-12-12T01:19:31,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742200_1376 (size=21) 2024-12-12T01:19:31,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742200_1376 (size=21) 2024-12-12T01:19:31,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742201_1377 (size=304057) 2024-12-12T01:19:31,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742201_1377 (size=304057) 2024-12-12T01:19:31,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742201_1377 (size=304057) 2024-12-12T01:19:31,369 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:19:31,369 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:19:31,398 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0007_000001 (auth:SIMPLE) from 127.0.0.1:55828 2024-12-12T01:19:36,216 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:19:38,161 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0007_000001 (auth:SIMPLE) from 127.0.0.1:49086 2024-12-12T01:19:38,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742202_1378 (size=349755) 2024-12-12T01:19:38,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742202_1378 (size=349755) 2024-12-12T01:19:38,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742202_1378 (size=349755) 2024-12-12T01:19:40,531 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0007_000001 (auth:SIMPLE) from 127.0.0.1:48392 2024-12-12T01:19:40,531 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0007_000001 (auth:SIMPLE) from 127.0.0.1:48886 2024-12-12T01:19:44,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742203_1379 (size=4945) 2024-12-12T01:19:44,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742203_1379 (size=4945) 2024-12-12T01:19:44,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742203_1379 (size=4945) 2024-12-12T01:19:44,577 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_3/usercache/jenkins/appcache/application_1733966109028_0007/container_1733966109028_0007_01_000002/launch_container.sh] 2024-12-12T01:19:44,578 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_3/usercache/jenkins/appcache/application_1733966109028_0007/container_1733966109028_0007_01_000002/container_tokens] 2024-12-12T01:19:44,578 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_3/usercache/jenkins/appcache/application_1733966109028_0007/container_1733966109028_0007_01_000002/sysfs] 2024-12-12T01:19:45,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742205_1381 (size=4945) 2024-12-12T01:19:45,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742205_1381 (size=4945) 2024-12-12T01:19:45,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742205_1381 (size=4945) 2024-12-12T01:19:45,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742204_1380 (size=22246) 2024-12-12T01:19:45,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742204_1380 (size=22246) 2024-12-12T01:19:45,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742204_1380 (size=22246) 2024-12-12T01:19:45,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742206_1382 (size=482) 2024-12-12T01:19:45,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742206_1382 (size=482) 2024-12-12T01:19:45,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742206_1382 (size=482) 2024-12-12T01:19:45,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742207_1383 (size=22246) 2024-12-12T01:19:45,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742207_1383 (size=22246) 2024-12-12T01:19:45,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742207_1383 (size=22246) 2024-12-12T01:19:45,231 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0007/container_1733966109028_0007_01_000003/launch_container.sh] 2024-12-12T01:19:45,231 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0007/container_1733966109028_0007_01_000003/container_tokens] 2024-12-12T01:19:45,231 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0007/container_1733966109028_0007_01_000003/sysfs] 2024-12-12T01:19:45,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742208_1384 (size=349755) 2024-12-12T01:19:45,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742208_1384 (size=349755) 2024-12-12T01:19:45,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742208_1384 (size=349755) 2024-12-12T01:19:45,267 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0007_000001 (auth:SIMPLE) from 127.0.0.1:57856 2024-12-12T01:19:45,274 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0007_000001 (auth:SIMPLE) from 127.0.0.1:44528 2024-12-12T01:19:46,558 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-12T01:19:46,559 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-12T01:19:46,566 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,566 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-12T01:19:46,567 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-12T01:19:46,567 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,567 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-12T01:19:46,567 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-12T01:19:46,567 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966368596/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966368596/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,568 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966368596/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-12T01:19:46,568 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966368596/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-12T01:19:46,573 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-12T01:19:46,577 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966386577"}]},"ts":"1733966386577"} 2024-12-12T01:19:46,579 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-12T01:19:46,579 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-12T01:19:46,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-12T01:19:46,581 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fd36ddb471cee7fb43bda32dd22b1fd4, UNASSIGN}] 2024-12-12T01:19:46,582 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fd36ddb471cee7fb43bda32dd22b1fd4, UNASSIGN 2024-12-12T01:19:46,582 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=fd36ddb471cee7fb43bda32dd22b1fd4, regionState=CLOSING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:19:46,584 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fd36ddb471cee7fb43bda32dd22b1fd4, UNASSIGN because future has completed 2024-12-12T01:19:46,584 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:19:46,584 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure fd36ddb471cee7fb43bda32dd22b1fd4, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:19:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-12T01:19:46,737 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:46,737 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:19:46,737 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing fd36ddb471cee7fb43bda32dd22b1fd4, disabling compactions & flushes 2024-12-12T01:19:46,737 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4. 2024-12-12T01:19:46,737 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4. 2024-12-12T01:19:46,737 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4. after waiting 0 ms 2024-12-12T01:19:46,737 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4. 2024-12-12T01:19:46,744 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-12T01:19:46,745 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:19:46,745 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4. 2024-12-12T01:19:46,746 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for fd36ddb471cee7fb43bda32dd22b1fd4: Waiting for close lock at 1733966386737Running coprocessor pre-close hooks at 1733966386737Disabling compacts and flushes for region at 1733966386737Disabling writes for close at 1733966386737Writing region close event to WAL at 1733966386740 (+3 ms)Running coprocessor post-close hooks at 1733966386745 (+5 ms)Closed at 1733966386745 2024-12-12T01:19:46,747 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:46,748 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=fd36ddb471cee7fb43bda32dd22b1fd4, regionState=CLOSED 2024-12-12T01:19:46,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure fd36ddb471cee7fb43bda32dd22b1fd4, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:19:46,752 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-12-12T01:19:46,752 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure fd36ddb471cee7fb43bda32dd22b1fd4, server=36236b0d7605,38295,1733966101221 in 167 msec 2024-12-12T01:19:46,754 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-12-12T01:19:46,754 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fd36ddb471cee7fb43bda32dd22b1fd4, UNASSIGN in 171 msec 2024-12-12T01:19:46,755 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-12-12T01:19:46,756 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 175 msec 2024-12-12T01:19:46,757 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966386757"}]},"ts":"1733966386757"} 2024-12-12T01:19:46,758 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-12T01:19:46,759 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-12T01:19:46,760 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 185 msec 2024-12-12T01:19:46,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-12T01:19:46,894 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-12T01:19:46,895 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,897 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,897 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,900 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,901 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:46,901 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:46,901 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:46,903 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e/recovered.edits] 2024-12-12T01:19:46,903 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/recovered.edits] 2024-12-12T01:19:46,903 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05/recovered.edits] 2024-12-12T01:19:46,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,906 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-12T01:19:46,906 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-12T01:19:46,906 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-12T01:19:46,906 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-12T01:19:46,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:46,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:46,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:46,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:46,909 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e/cf/6449ecaf49384d9fa8835dbbfeaa2817 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e/cf/6449ecaf49384d9fa8835dbbfeaa2817 2024-12-12T01:19:46,909 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05/cf/401967155f804a688b0cb0fc2ca3159d to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05/cf/401967155f804a688b0cb0fc2ca3159d 2024-12-12T01:19:46,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-12T01:19:46,909 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:46,909 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/cf/401967155f804a688b0cb0fc2ca3159d.cd5ddb0f1e06f57c3e0d77ec739d0a05 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/cf/401967155f804a688b0cb0fc2ca3159d.cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:46,910 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:46,910 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:46,910 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:46,910 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/cf/6449ecaf49384d9fa8835dbbfeaa2817.ddca7b29418e8e04988981b41fe0793e to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/cf/6449ecaf49384d9fa8835dbbfeaa2817.ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:46,913 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05/recovered.edits/8.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05/recovered.edits/8.seqid 2024-12-12T01:19:46,913 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e/recovered.edits/8.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e/recovered.edits/8.seqid 2024-12-12T01:19:46,914 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/cd5ddb0f1e06f57c3e0d77ec739d0a05 2024-12-12T01:19:46,914 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/ddca7b29418e8e04988981b41fe0793e 2024-12-12T01:19:46,914 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/recovered.edits/12.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4/recovered.edits/12.seqid 2024-12-12T01:19:46,914 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fd36ddb471cee7fb43bda32dd22b1fd4 2024-12-12T01:19:46,914 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-12T01:19:46,916 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,919 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-12T01:19:46,921 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-12T01:19:46,922 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,922 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-12T01:19:46,922 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966386922"}]},"ts":"9223372036854775807"} 2024-12-12T01:19:46,924 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-12T01:19:46,924 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => fd36ddb471cee7fb43bda32dd22b1fd4, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T01:19:46,924 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-12T01:19:46,924 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733966386924"}]},"ts":"9223372036854775807"} 2024-12-12T01:19:46,925 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-12T01:19:46,926 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:46,927 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 31 msec 2024-12-12T01:19:47,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-12T01:19:47,015 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:47,015 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-12T01:19:47,016 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-12T01:19:47,019 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966387018"}]},"ts":"1733966387018"} 2024-12-12T01:19:47,020 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-12T01:19:47,020 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-12T01:19:47,021 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-12T01:19:47,022 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=6bf52e50c78a758898cb8b5b8d262309, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=27b918ec566d8a122c845b476f66eb50, UNASSIGN}] 2024-12-12T01:19:47,023 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=27b918ec566d8a122c845b476f66eb50, UNASSIGN 2024-12-12T01:19:47,023 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=6bf52e50c78a758898cb8b5b8d262309, UNASSIGN 2024-12-12T01:19:47,023 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=27b918ec566d8a122c845b476f66eb50, regionState=CLOSING, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:19:47,023 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=6bf52e50c78a758898cb8b5b8d262309, regionState=CLOSING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:19:47,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=27b918ec566d8a122c845b476f66eb50, UNASSIGN because future has completed 2024-12-12T01:19:47,025 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:19:47,025 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 27b918ec566d8a122c845b476f66eb50, server=36236b0d7605,44743,1733966101408}] 2024-12-12T01:19:47,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=6bf52e50c78a758898cb8b5b8d262309, UNASSIGN because future has completed 2024-12-12T01:19:47,026 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:19:47,026 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6bf52e50c78a758898cb8b5b8d262309, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:19:47,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-12T01:19:47,177 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:47,177 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:19:47,177 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing 27b918ec566d8a122c845b476f66eb50, disabling compactions & flushes 2024-12-12T01:19:47,177 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. 2024-12-12T01:19:47,178 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. 2024-12-12T01:19:47,178 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. after waiting 0 ms 2024-12-12T01:19:47,178 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. 2024-12-12T01:19:47,179 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:47,179 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:19:47,179 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing 6bf52e50c78a758898cb8b5b8d262309, disabling compactions & flushes 2024-12-12T01:19:47,179 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:47,179 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:47,179 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. after waiting 0 ms 2024-12-12T01:19:47,179 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:47,183 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:19:47,183 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:19:47,183 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50. 2024-12-12T01:19:47,184 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for 27b918ec566d8a122c845b476f66eb50: Waiting for close lock at 1733966387177Running coprocessor pre-close hooks at 1733966387177Disabling compacts and flushes for region at 1733966387177Disabling writes for close at 1733966387178 (+1 ms)Writing region close event to WAL at 1733966387178Running coprocessor post-close hooks at 1733966387183 (+5 ms)Closed at 1733966387183 2024-12-12T01:19:47,184 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:19:47,185 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:19:47,185 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309. 2024-12-12T01:19:47,185 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for 6bf52e50c78a758898cb8b5b8d262309: Waiting for close lock at 1733966387179Running coprocessor pre-close hooks at 1733966387179Disabling compacts and flushes for region at 1733966387179Disabling writes for close at 1733966387179Writing region close event to WAL at 1733966387180 (+1 ms)Running coprocessor post-close hooks at 1733966387184 (+4 ms)Closed at 1733966387185 (+1 ms) 2024-12-12T01:19:47,185 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed 27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:47,186 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=27b918ec566d8a122c845b476f66eb50, regionState=CLOSED 2024-12-12T01:19:47,186 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed 6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:47,186 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=6bf52e50c78a758898cb8b5b8d262309, regionState=CLOSED 2024-12-12T01:19:47,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 27b918ec566d8a122c845b476f66eb50, server=36236b0d7605,44743,1733966101408 because future has completed 2024-12-12T01:19:47,188 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6bf52e50c78a758898cb8b5b8d262309, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:19:47,189 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=165 2024-12-12T01:19:47,190 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure 27b918ec566d8a122c845b476f66eb50, server=36236b0d7605,44743,1733966101408 in 163 msec 2024-12-12T01:19:47,190 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=164 2024-12-12T01:19:47,190 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure 6bf52e50c78a758898cb8b5b8d262309, server=36236b0d7605,38295,1733966101221 in 163 msec 2024-12-12T01:19:47,191 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=27b918ec566d8a122c845b476f66eb50, UNASSIGN in 168 msec 2024-12-12T01:19:47,192 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=164, resume processing ppid=163 2024-12-12T01:19:47,192 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=6bf52e50c78a758898cb8b5b8d262309, UNASSIGN in 168 msec 2024-12-12T01:19:47,194 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-12-12T01:19:47,194 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 171 msec 2024-12-12T01:19:47,195 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966387195"}]},"ts":"1733966387195"} 2024-12-12T01:19:47,196 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-12T01:19:47,196 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-12T01:19:47,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 181 msec 2024-12-12T01:19:47,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-12T01:19:47,335 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-12T01:19:47,335 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,337 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,338 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,340 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,341 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:47,341 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:47,343 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309/recovered.edits] 2024-12-12T01:19:47,343 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50/recovered.edits] 2024-12-12T01:19:47,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,344 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-12T01:19:47,344 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-12T01:19:47,344 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-12T01:19:47,344 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-12T01:19:47,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:47,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:47,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:47,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:47,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-12T01:19:47,348 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50/cf/b7de830b3d30400f9bbf5f81ccbe0956 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50/cf/b7de830b3d30400f9bbf5f81ccbe0956 2024-12-12T01:19:47,348 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309/cf/dde3dbdfa35a4d7e841e651729a5c685 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309/cf/dde3dbdfa35a4d7e841e651729a5c685 2024-12-12T01:19:47,351 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50/recovered.edits/9.seqid 2024-12-12T01:19:47,351 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309/recovered.edits/9.seqid 2024-12-12T01:19:47,351 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/27b918ec566d8a122c845b476f66eb50 2024-12-12T01:19:47,351 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithMergeRegion/6bf52e50c78a758898cb8b5b8d262309 2024-12-12T01:19:47,351 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-12T01:19:47,353 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,355 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-12T01:19:47,357 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-12T01:19:47,358 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,358 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-12T01:19:47,358 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966387358"}]},"ts":"9223372036854775807"} 2024-12-12T01:19:47,358 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966387358"}]},"ts":"9223372036854775807"} 2024-12-12T01:19:47,360 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-12T01:19:47,360 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 6bf52e50c78a758898cb8b5b8d262309, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733966364256.6bf52e50c78a758898cb8b5b8d262309.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 27b918ec566d8a122c845b476f66eb50, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733966364256.27b918ec566d8a122c845b476f66eb50.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T01:19:47,360 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-12T01:19:47,360 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733966387360"}]},"ts":"9223372036854775807"} 2024-12-12T01:19:47,362 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-12T01:19:47,362 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,363 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 27 msec 2024-12-12T01:19:47,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-12T01:19:47,455 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,455 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-12T01:19:47,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-12T01:19:47,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,465 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-12T01:19:47,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:47,467 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-12-12T01:19:47,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:47,490 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=810 (was 798) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:43721 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43721 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_530705222_1 at /127.0.0.1:58362 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 5168) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_530705222_1 at /127.0.0.1:44884 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:41910 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5862 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:44918 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:58378 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=793 (was 778) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=716 (was 828), ProcessCount=14 (was 17), AvailableMemoryMB=5993 (was 5779) - AvailableMemoryMB LEAK? - 2024-12-12T01:19:47,491 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=810 is superior to 500 2024-12-12T01:19:47,510 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=810, OpenFileDescriptor=793, MaxFileDescriptor=1048576, SystemLoadAverage=716, ProcessCount=14, AvailableMemoryMB=5992 2024-12-12T01:19:47,510 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=810 is superior to 500 2024-12-12T01:19:47,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T01:19:47,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T01:19:47,513 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T01:19:47,513 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:47,513 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-12-12T01:19:47,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-12T01:19:47,514 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T01:19:47,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742209_1385 (size=407) 2024-12-12T01:19:47,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742209_1385 (size=407) 2024-12-12T01:19:47,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742209_1385 (size=407) 2024-12-12T01:19:47,523 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b350edc44c3ca5c3a550d7a8c4d546a2, NAME => 'testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:19:47,523 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => e8af4be5fd37254877e2fab7e9514d84, NAME => 'testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:19:47,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742211_1387 (size=68) 2024-12-12T01:19:47,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742210_1386 (size=68) 2024-12-12T01:19:47,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742210_1386 (size=68) 2024-12-12T01:19:47,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742211_1387 (size=68) 2024-12-12T01:19:47,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742211_1387 (size=68) 2024-12-12T01:19:47,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742210_1386 (size=68) 2024-12-12T01:19:47,533 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:47,533 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:47,533 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing b350edc44c3ca5c3a550d7a8c4d546a2, disabling compactions & flushes 2024-12-12T01:19:47,533 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing e8af4be5fd37254877e2fab7e9514d84, disabling compactions & flushes 2024-12-12T01:19:47,533 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:19:47,533 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:19:47,533 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. 2024-12-12T01:19:47,533 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. after waiting 0 ms 2024-12-12T01:19:47,533 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. 2024-12-12T01:19:47,533 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:19:47,533 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:19:47,533 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. after waiting 0 ms 2024-12-12T01:19:47,533 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. 2024-12-12T01:19:47,533 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for b350edc44c3ca5c3a550d7a8c4d546a2: Waiting for close lock at 1733966387533Disabling compacts and flushes for region at 1733966387533Disabling writes for close at 1733966387533Writing region close event to WAL at 1733966387533Closed at 1733966387533 2024-12-12T01:19:47,533 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. 2024-12-12T01:19:47,533 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for e8af4be5fd37254877e2fab7e9514d84: Waiting for close lock at 1733966387533Disabling compacts and flushes for region at 1733966387533Disabling writes for close at 1733966387533Writing region close event to WAL at 1733966387533Closed at 1733966387533 2024-12-12T01:19:47,534 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T01:19:47,535 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733966387534"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966387534"}]},"ts":"1733966387534"} 2024-12-12T01:19:47,535 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733966387534"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966387534"}]},"ts":"1733966387534"} 2024-12-12T01:19:47,537 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-12T01:19:47,538 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T01:19:47,538 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966387538"}]},"ts":"1733966387538"} 2024-12-12T01:19:47,540 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-12T01:19:47,540 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:19:47,541 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:19:47,541 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:19:47,541 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:19:47,541 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:19:47,541 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:19:47,541 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:19:47,541 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:19:47,541 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:19:47,541 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:19:47,541 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:19:47,541 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b350edc44c3ca5c3a550d7a8c4d546a2, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e8af4be5fd37254877e2fab7e9514d84, ASSIGN}] 2024-12-12T01:19:47,542 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e8af4be5fd37254877e2fab7e9514d84, ASSIGN 2024-12-12T01:19:47,542 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b350edc44c3ca5c3a550d7a8c4d546a2, ASSIGN 2024-12-12T01:19:47,543 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e8af4be5fd37254877e2fab7e9514d84, ASSIGN; state=OFFLINE, location=36236b0d7605,33883,1733966101479; forceNewPlan=false, retain=false 2024-12-12T01:19:47,543 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b350edc44c3ca5c3a550d7a8c4d546a2, ASSIGN; state=OFFLINE, location=36236b0d7605,44743,1733966101408; forceNewPlan=false, retain=false 2024-12-12T01:19:47,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-12T01:19:47,693 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T01:19:47,694 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=b350edc44c3ca5c3a550d7a8c4d546a2, regionState=OPENING, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:19:47,694 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=e8af4be5fd37254877e2fab7e9514d84, regionState=OPENING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:19:47,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e8af4be5fd37254877e2fab7e9514d84, ASSIGN because future has completed 2024-12-12T01:19:47,696 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure e8af4be5fd37254877e2fab7e9514d84, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:19:47,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b350edc44c3ca5c3a550d7a8c4d546a2, ASSIGN because future has completed 2024-12-12T01:19:47,697 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure b350edc44c3ca5c3a550d7a8c4d546a2, server=36236b0d7605,44743,1733966101408}] 2024-12-12T01:19:47,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-12T01:19:47,851 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:19:47,851 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => b350edc44c3ca5c3a550d7a8c4d546a2, NAME => 'testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T01:19:47,851 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. 2024-12-12T01:19:47,851 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => e8af4be5fd37254877e2fab7e9514d84, NAME => 'testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T01:19:47,851 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. service=AccessControlService 2024-12-12T01:19:47,851 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. service=AccessControlService 2024-12-12T01:19:47,852 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:19:47,852 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:19:47,852 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:47,852 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:47,852 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:47,852 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:47,852 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:47,852 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:47,852 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:47,852 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:47,853 INFO [StoreOpener-e8af4be5fd37254877e2fab7e9514d84-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:47,853 INFO [StoreOpener-b350edc44c3ca5c3a550d7a8c4d546a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:47,855 INFO [StoreOpener-e8af4be5fd37254877e2fab7e9514d84-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e8af4be5fd37254877e2fab7e9514d84 columnFamilyName cf 2024-12-12T01:19:47,855 INFO [StoreOpener-b350edc44c3ca5c3a550d7a8c4d546a2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b350edc44c3ca5c3a550d7a8c4d546a2 columnFamilyName cf 2024-12-12T01:19:47,855 DEBUG [StoreOpener-e8af4be5fd37254877e2fab7e9514d84-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:47,855 DEBUG [StoreOpener-b350edc44c3ca5c3a550d7a8c4d546a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:47,855 INFO [StoreOpener-e8af4be5fd37254877e2fab7e9514d84-1 {}] regionserver.HStore(327): Store=e8af4be5fd37254877e2fab7e9514d84/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:19:47,855 INFO [StoreOpener-b350edc44c3ca5c3a550d7a8c4d546a2-1 {}] regionserver.HStore(327): Store=b350edc44c3ca5c3a550d7a8c4d546a2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:19:47,855 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:47,855 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:47,856 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:47,856 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:47,856 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:47,856 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:47,857 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:47,857 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:47,857 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:47,857 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:47,858 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:47,858 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:47,860 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:19:47,860 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:19:47,860 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened b350edc44c3ca5c3a550d7a8c4d546a2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69419716, jitterRate=0.034434378147125244}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:19:47,860 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened e8af4be5fd37254877e2fab7e9514d84; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58952163, jitterRate=-0.12154431641101837}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:19:47,860 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:47,860 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:47,861 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for e8af4be5fd37254877e2fab7e9514d84: Running coprocessor pre-open hook at 1733966387852Writing region info on filesystem at 1733966387852Initializing all the Stores at 1733966387853 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966387853Cleaning up temporary data from old regions at 1733966387857 (+4 ms)Running coprocessor post-open hooks at 1733966387860 (+3 ms)Region opened successfully at 1733966387861 (+1 ms) 2024-12-12T01:19:47,861 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for b350edc44c3ca5c3a550d7a8c4d546a2: Running coprocessor pre-open hook at 1733966387852Writing region info on filesystem at 1733966387852Initializing all the Stores at 1733966387853 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966387853Cleaning up temporary data from old regions at 1733966387857 (+4 ms)Running coprocessor post-open hooks at 1733966387860 (+3 ms)Region opened successfully at 1733966387861 (+1 ms) 2024-12-12T01:19:47,862 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2., pid=173, masterSystemTime=1733966387848 2024-12-12T01:19:47,862 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84., pid=172, masterSystemTime=1733966387848 2024-12-12T01:19:47,864 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. 2024-12-12T01:19:47,864 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. 2024-12-12T01:19:47,865 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=e8af4be5fd37254877e2fab7e9514d84, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:19:47,865 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:19:47,865 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:19:47,865 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=b350edc44c3ca5c3a550d7a8c4d546a2, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:19:47,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure e8af4be5fd37254877e2fab7e9514d84, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:19:47,868 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure b350edc44c3ca5c3a550d7a8c4d546a2, server=36236b0d7605,44743,1733966101408 because future has completed 2024-12-12T01:19:47,869 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-12-12T01:19:47,869 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure e8af4be5fd37254877e2fab7e9514d84, server=36236b0d7605,33883,1733966101479 in 171 msec 2024-12-12T01:19:47,871 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-12-12T01:19:47,871 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e8af4be5fd37254877e2fab7e9514d84, ASSIGN in 328 msec 2024-12-12T01:19:47,871 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure b350edc44c3ca5c3a550d7a8c4d546a2, server=36236b0d7605,44743,1733966101408 in 172 msec 2024-12-12T01:19:47,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=169 2024-12-12T01:19:47,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b350edc44c3ca5c3a550d7a8c4d546a2, ASSIGN in 330 msec 2024-12-12T01:19:47,873 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T01:19:47,873 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966387873"}]},"ts":"1733966387873"} 2024-12-12T01:19:47,875 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-12T01:19:47,876 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T01:19:47,876 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-12T01:19:47,879 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-12T01:19:47,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:47,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:47,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:47,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:47,883 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:47,883 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:47,883 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:47,883 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:47,884 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 371 msec 2024-12-12T01:19:48,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-12T01:19:48,145 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-12T01:19:48,145 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-12T01:19:48,145 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:19:48,148 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-12T01:19:48,149 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:19:48,149 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-12-12T01:19:48,149 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-12T01:19:48,151 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-12T01:19:48,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966388152 (current time:1733966388152). 2024-12-12T01:19:48,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:19:48,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-12T01:19:48,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:19:48,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ee92e93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:48,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:19:48,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:19:48,154 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:19:48,154 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:19:48,154 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:19:48,154 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4acbc374, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:48,154 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:19:48,155 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:19:48,155 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:48,156 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41486, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:19:48,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d0d42c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:48,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:19:48,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:19:48,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:19:48,158 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39052, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:19:48,159 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:19:48,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:19:48,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:48,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:48,159 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:19:48,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e979dc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:48,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:19:48,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:19:48,161 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:19:48,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:19:48,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:19:48,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@aa79648, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:48,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:19:48,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:19:48,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:48,162 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41492, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:19:48,163 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fab0af4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:48,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:19:48,164 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:19:48,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:19:48,165 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39066, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:19:48,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:19:48,168 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:19:48,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:19:48,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:48,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:48,168 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:19:48,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-12T01:19:48,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:19:48,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-12T01:19:48,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-12T01:19:48,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-12T01:19:48,171 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:19:48,172 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:19:48,174 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:19:48,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742212_1388 (size=170) 2024-12-12T01:19:48,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742212_1388 (size=170) 2024-12-12T01:19:48,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742212_1388 (size=170) 2024-12-12T01:19:48,181 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:19:48,182 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b350edc44c3ca5c3a550d7a8c4d546a2}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8af4be5fd37254877e2fab7e9514d84}] 2024-12-12T01:19:48,182 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:48,182 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:48,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-12T01:19:48,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44743 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-12-12T01:19:48,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-12-12T01:19:48,334 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. 2024-12-12T01:19:48,334 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:19:48,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for b350edc44c3ca5c3a550d7a8c4d546a2: 2024-12-12T01:19:48,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for e8af4be5fd37254877e2fab7e9514d84: 2024-12-12T01:19:48,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-12T01:19:48,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-12T01:19:48,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-12T01:19:48,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-12T01:19:48,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:19:48,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:19:48,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:19:48,335 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:19:48,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742213_1389 (size=71) 2024-12-12T01:19:48,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742214_1390 (size=71) 2024-12-12T01:19:48,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742213_1389 (size=71) 2024-12-12T01:19:48,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742213_1389 (size=71) 2024-12-12T01:19:48,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742214_1390 (size=71) 2024-12-12T01:19:48,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742214_1390 (size=71) 2024-12-12T01:19:48,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:19:48,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-12-12T01:19:48,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-12-12T01:19:48,345 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:48,346 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:48,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. 2024-12-12T01:19:48,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-12T01:19:48,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-12-12T01:19:48,347 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:48,347 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:48,348 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b350edc44c3ca5c3a550d7a8c4d546a2 in 164 msec 2024-12-12T01:19:48,349 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=176, resume processing ppid=174 2024-12-12T01:19:48,349 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e8af4be5fd37254877e2fab7e9514d84 in 166 msec 2024-12-12T01:19:48,349 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:19:48,350 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:19:48,350 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:19:48,350 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-12T01:19:48,351 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-12T01:19:48,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742215_1391 (size=552) 2024-12-12T01:19:48,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742215_1391 (size=552) 2024-12-12T01:19:48,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742215_1391 (size=552) 2024-12-12T01:19:48,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-12T01:19:48,767 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:19:48,771 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:19:48,771 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-12T01:19:48,773 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:19:48,773 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-12T01:19:48,774 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 604 msec 2024-12-12T01:19:48,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-12T01:19:48,795 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-12T01:19:48,799 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='09be94a43999abf8e471579c371e9cc77', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:19:48,800 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='121f1bc1315a24c3438a4970e46593391', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:19:48,801 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='2c69d7f68b536a4cc5cfd5acae8a615c3', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:19:48,802 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='36ca2d3f8927c5237a838bf8492fdd11a', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:19:48,802 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='467b0efbd40ce43c5f2ef7087038cf2ef', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:19:48,803 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='565e74e8bde004ed47052371222847747', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:19:48,805 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44743 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:19:48,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33883 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:19:48,809 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-12T01:19:48,812 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-12T01:19:48,812 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:19:48,812 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:19:48,814 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-12T01:19:48,819 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-12T01:19:48,823 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-12T01:19:48,825 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-12T01:19:48,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966388825 (current time:1733966388825). 2024-12-12T01:19:48,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:19:48,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-12T01:19:48,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:19:48,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38337713, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:48,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:19:48,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:19:48,827 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:19:48,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:19:48,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:19:48,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58a4be76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:48,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:19:48,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:19:48,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:48,829 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41504, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:19:48,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62c48dcd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:48,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:19:48,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:19:48,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:19:48,831 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39080, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:19:48,832 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:19:48,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:19:48,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:48,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:48,832 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:19:48,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@386ae348, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:48,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:19:48,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:19:48,834 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:19:48,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:19:48,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:19:48,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59994e6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:48,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:19:48,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:19:48,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:48,835 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41532, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:19:48,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b6eced0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:48,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:19:48,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:19:48,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:19:48,837 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39082, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:19:48,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:19:48,840 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:19:48,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:19:48,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:48,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:48,840 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:19:48,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-12T01:19:48,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:19:48,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-12T01:19:48,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-12T01:19:48,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-12T01:19:48,842 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:19:48,843 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:19:48,845 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:19:48,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742216_1392 (size=165) 2024-12-12T01:19:48,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742216_1392 (size=165) 2024-12-12T01:19:48,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742216_1392 (size=165) 2024-12-12T01:19:48,852 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:19:48,852 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b350edc44c3ca5c3a550d7a8c4d546a2}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8af4be5fd37254877e2fab7e9514d84}] 2024-12-12T01:19:48,853 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:48,853 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:48,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-12T01:19:49,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-12-12T01:19:49,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44743 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-12-12T01:19:49,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:19:49,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. 2024-12-12T01:19:49,005 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing b350edc44c3ca5c3a550d7a8c4d546a2 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-12T01:19:49,005 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing e8af4be5fd37254877e2fab7e9514d84 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-12T01:19:49,022 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84/.tmp/cf/d8d4ed82281442768515efde67fff68b is 71, key is 111a953e542ebf32c3948ca457f71d86/cf:q/1733966388808/Put/seqid=0 2024-12-12T01:19:49,023 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2/.tmp/cf/5a18e617269142f1b9be9502922a53f8 is 71, key is 012377f31441f0639e628baf8a1a8b8b/cf:q/1733966388805/Put/seqid=0 2024-12-12T01:19:49,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742217_1393 (size=8256) 2024-12-12T01:19:49,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742217_1393 (size=8256) 2024-12-12T01:19:49,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742217_1393 (size=8256) 2024-12-12T01:19:49,035 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84/.tmp/cf/d8d4ed82281442768515efde67fff68b 2024-12-12T01:19:49,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742218_1394 (size=5354) 2024-12-12T01:19:49,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742218_1394 (size=5354) 2024-12-12T01:19:49,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742218_1394 (size=5354) 2024-12-12T01:19:49,039 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2/.tmp/cf/5a18e617269142f1b9be9502922a53f8 2024-12-12T01:19:49,041 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84/.tmp/cf/d8d4ed82281442768515efde67fff68b as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84/cf/d8d4ed82281442768515efde67fff68b 2024-12-12T01:19:49,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2/.tmp/cf/5a18e617269142f1b9be9502922a53f8 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2/cf/5a18e617269142f1b9be9502922a53f8 2024-12-12T01:19:49,046 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84/cf/d8d4ed82281442768515efde67fff68b, entries=46, sequenceid=6, filesize=8.1 K 2024-12-12T01:19:49,047 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for e8af4be5fd37254877e2fab7e9514d84 in 42ms, sequenceid=6, compaction requested=false 2024-12-12T01:19:49,047 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-12T01:19:49,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for e8af4be5fd37254877e2fab7e9514d84: 2024-12-12T01:19:49,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. for snaptb0-testExportExpiredSnapshot completed. 2024-12-12T01:19:49,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-12T01:19:49,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:19:49,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84/cf/d8d4ed82281442768515efde67fff68b] hfiles 2024-12-12T01:19:49,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84/cf/d8d4ed82281442768515efde67fff68b for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-12T01:19:49,049 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2/cf/5a18e617269142f1b9be9502922a53f8, entries=4, sequenceid=6, filesize=5.2 K 2024-12-12T01:19:49,050 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for b350edc44c3ca5c3a550d7a8c4d546a2 in 45ms, sequenceid=6, compaction requested=false 2024-12-12T01:19:49,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for b350edc44c3ca5c3a550d7a8c4d546a2: 2024-12-12T01:19:49,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. for snaptb0-testExportExpiredSnapshot completed. 2024-12-12T01:19:49,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-12T01:19:49,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:19:49,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2/cf/5a18e617269142f1b9be9502922a53f8] hfiles 2024-12-12T01:19:49,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2/cf/5a18e617269142f1b9be9502922a53f8 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-12T01:19:49,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742219_1395 (size=110) 2024-12-12T01:19:49,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742219_1395 (size=110) 2024-12-12T01:19:49,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742219_1395 (size=110) 2024-12-12T01:19:49,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. 2024-12-12T01:19:49,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-12-12T01:19:49,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-12-12T01:19:49,063 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:49,063 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:19:49,066 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e8af4be5fd37254877e2fab7e9514d84 in 212 msec 2024-12-12T01:19:49,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742220_1396 (size=110) 2024-12-12T01:19:49,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742220_1396 (size=110) 2024-12-12T01:19:49,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742220_1396 (size=110) 2024-12-12T01:19:49,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:19:49,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-12T01:19:49,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-12-12T01:19:49,068 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:49,068 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:19:49,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=177 2024-12-12T01:19:49,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b350edc44c3ca5c3a550d7a8c4d546a2 in 217 msec 2024-12-12T01:19:49,071 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:19:49,071 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:19:49,072 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:19:49,072 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-12T01:19:49,073 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-12T01:19:49,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742221_1397 (size=630) 2024-12-12T01:19:49,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742221_1397 (size=630) 2024-12-12T01:19:49,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742221_1397 (size=630) 2024-12-12T01:19:49,083 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:19:49,087 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:19:49,087 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-12T01:19:49,088 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:19:49,089 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-12T01:19:49,090 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 248 msec 2024-12-12T01:19:49,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-12T01:19:49,155 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-12T01:19:49,156 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T01:19:49,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-12T01:19:49,158 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T01:19:49,158 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:49,158 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-12-12T01:19:49,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-12T01:19:49,159 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T01:19:49,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742222_1398 (size=400) 2024-12-12T01:19:49,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742222_1398 (size=400) 2024-12-12T01:19:49,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742222_1398 (size=400) 2024-12-12T01:19:49,172 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e4e1b654a5a995b6e71de7ab606443fe, NAME => 'testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:19:49,172 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => d644bbcb843c6264137c6748d170fd67, NAME => 'testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:19:49,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742224_1400 (size=61) 2024-12-12T01:19:49,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742224_1400 (size=61) 2024-12-12T01:19:49,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742224_1400 (size=61) 2024-12-12T01:19:49,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742223_1399 (size=61) 2024-12-12T01:19:49,183 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:49,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742223_1399 (size=61) 2024-12-12T01:19:49,183 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing d644bbcb843c6264137c6748d170fd67, disabling compactions & flushes 2024-12-12T01:19:49,183 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. 2024-12-12T01:19:49,183 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. 2024-12-12T01:19:49,183 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. after waiting 0 ms 2024-12-12T01:19:49,183 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. 2024-12-12T01:19:49,183 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. 2024-12-12T01:19:49,183 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for d644bbcb843c6264137c6748d170fd67: Waiting for close lock at 1733966389183Disabling compacts and flushes for region at 1733966389183Disabling writes for close at 1733966389183Writing region close event to WAL at 1733966389183Closed at 1733966389183 2024-12-12T01:19:49,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742223_1399 (size=61) 2024-12-12T01:19:49,184 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:49,184 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing e4e1b654a5a995b6e71de7ab606443fe, disabling compactions & flushes 2024-12-12T01:19:49,184 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. 2024-12-12T01:19:49,184 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. 2024-12-12T01:19:49,184 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. after waiting 0 ms 2024-12-12T01:19:49,184 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. 2024-12-12T01:19:49,184 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. 2024-12-12T01:19:49,184 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for e4e1b654a5a995b6e71de7ab606443fe: Waiting for close lock at 1733966389184Disabling compacts and flushes for region at 1733966389184Disabling writes for close at 1733966389184Writing region close event to WAL at 1733966389184Closed at 1733966389184 2024-12-12T01:19:49,185 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T01:19:49,185 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733966389185"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966389185"}]},"ts":"1733966389185"} 2024-12-12T01:19:49,186 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733966389185"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966389185"}]},"ts":"1733966389185"} 2024-12-12T01:19:49,188 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-12T01:19:49,188 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T01:19:49,188 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966389188"}]},"ts":"1733966389188"} 2024-12-12T01:19:49,190 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-12T01:19:49,190 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:19:49,191 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:19:49,191 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:19:49,191 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:19:49,191 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:19:49,191 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:19:49,191 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:19:49,191 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:19:49,191 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:19:49,191 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:19:49,191 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:19:49,192 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=e4e1b654a5a995b6e71de7ab606443fe, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d644bbcb843c6264137c6748d170fd67, ASSIGN}] 2024-12-12T01:19:49,193 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=e4e1b654a5a995b6e71de7ab606443fe, ASSIGN 2024-12-12T01:19:49,193 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d644bbcb843c6264137c6748d170fd67, ASSIGN 2024-12-12T01:19:49,193 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=e4e1b654a5a995b6e71de7ab606443fe, ASSIGN; state=OFFLINE, location=36236b0d7605,38295,1733966101221; forceNewPlan=false, retain=false 2024-12-12T01:19:49,193 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d644bbcb843c6264137c6748d170fd67, ASSIGN; state=OFFLINE, location=36236b0d7605,33883,1733966101479; forceNewPlan=false, retain=false 2024-12-12T01:19:49,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-12T01:19:49,344 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T01:19:49,344 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=d644bbcb843c6264137c6748d170fd67, regionState=OPENING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:19:49,344 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=e4e1b654a5a995b6e71de7ab606443fe, regionState=OPENING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:19:49,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=e4e1b654a5a995b6e71de7ab606443fe, ASSIGN because future has completed 2024-12-12T01:19:49,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure e4e1b654a5a995b6e71de7ab606443fe, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:19:49,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d644bbcb843c6264137c6748d170fd67, ASSIGN because future has completed 2024-12-12T01:19:49,347 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure d644bbcb843c6264137c6748d170fd67, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:19:49,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-12T01:19:49,502 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. 2024-12-12T01:19:49,502 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => e4e1b654a5a995b6e71de7ab606443fe, NAME => 'testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T01:19:49,502 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. 2024-12-12T01:19:49,502 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => d644bbcb843c6264137c6748d170fd67, NAME => 'testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T01:19:49,502 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. service=AccessControlService 2024-12-12T01:19:49,502 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. service=AccessControlService 2024-12-12T01:19:49,502 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:19:49,502 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:19:49,502 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot d644bbcb843c6264137c6748d170fd67 2024-12-12T01:19:49,502 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:19:49,502 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:49,503 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:19:49,503 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for d644bbcb843c6264137c6748d170fd67 2024-12-12T01:19:49,503 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for d644bbcb843c6264137c6748d170fd67 2024-12-12T01:19:49,503 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:19:49,503 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:19:49,504 INFO [StoreOpener-d644bbcb843c6264137c6748d170fd67-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d644bbcb843c6264137c6748d170fd67 2024-12-12T01:19:49,504 INFO [StoreOpener-e4e1b654a5a995b6e71de7ab606443fe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:19:49,505 INFO [StoreOpener-e4e1b654a5a995b6e71de7ab606443fe-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4e1b654a5a995b6e71de7ab606443fe columnFamilyName cf 2024-12-12T01:19:49,505 INFO [StoreOpener-d644bbcb843c6264137c6748d170fd67-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d644bbcb843c6264137c6748d170fd67 columnFamilyName cf 2024-12-12T01:19:49,505 DEBUG [StoreOpener-e4e1b654a5a995b6e71de7ab606443fe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:49,505 DEBUG [StoreOpener-d644bbcb843c6264137c6748d170fd67-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:19:49,506 INFO [StoreOpener-d644bbcb843c6264137c6748d170fd67-1 {}] regionserver.HStore(327): Store=d644bbcb843c6264137c6748d170fd67/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:19:49,506 INFO [StoreOpener-e4e1b654a5a995b6e71de7ab606443fe-1 {}] regionserver.HStore(327): Store=e4e1b654a5a995b6e71de7ab606443fe/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:19:49,506 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for d644bbcb843c6264137c6748d170fd67 2024-12-12T01:19:49,506 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:19:49,507 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/d644bbcb843c6264137c6748d170fd67 2024-12-12T01:19:49,507 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:19:49,507 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/d644bbcb843c6264137c6748d170fd67 2024-12-12T01:19:49,507 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:19:49,508 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for d644bbcb843c6264137c6748d170fd67 2024-12-12T01:19:49,508 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for d644bbcb843c6264137c6748d170fd67 2024-12-12T01:19:49,508 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:19:49,508 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:19:49,509 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for d644bbcb843c6264137c6748d170fd67 2024-12-12T01:19:49,509 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:19:49,511 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/d644bbcb843c6264137c6748d170fd67/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:19:49,511 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/e4e1b654a5a995b6e71de7ab606443fe/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:19:49,511 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened d644bbcb843c6264137c6748d170fd67; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63254963, jitterRate=-0.057427600026130676}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:19:49,511 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d644bbcb843c6264137c6748d170fd67 2024-12-12T01:19:49,511 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened e4e1b654a5a995b6e71de7ab606443fe; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59868619, jitterRate=-0.10788805782794952}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:19:49,511 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:19:49,512 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for e4e1b654a5a995b6e71de7ab606443fe: Running coprocessor pre-open hook at 1733966389503Writing region info on filesystem at 1733966389503Initializing all the Stores at 1733966389503Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966389504 (+1 ms)Cleaning up temporary data from old regions at 1733966389508 (+4 ms)Running coprocessor post-open hooks at 1733966389511 (+3 ms)Region opened successfully at 1733966389512 (+1 ms) 2024-12-12T01:19:49,512 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for d644bbcb843c6264137c6748d170fd67: Running coprocessor pre-open hook at 1733966389503Writing region info on filesystem at 1733966389503Initializing all the Stores at 1733966389503Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966389503Cleaning up temporary data from old regions at 1733966389508 (+5 ms)Running coprocessor post-open hooks at 1733966389511 (+3 ms)Region opened successfully at 1733966389512 (+1 ms) 2024-12-12T01:19:49,513 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe., pid=183, masterSystemTime=1733966389498 2024-12-12T01:19:49,513 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67., pid=184, masterSystemTime=1733966389499 2024-12-12T01:19:49,514 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. 2024-12-12T01:19:49,514 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. 2024-12-12T01:19:49,515 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=d644bbcb843c6264137c6748d170fd67, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:19:49,515 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. 2024-12-12T01:19:49,515 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. 2024-12-12T01:19:49,516 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=e4e1b654a5a995b6e71de7ab606443fe, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:19:49,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure d644bbcb843c6264137c6748d170fd67, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:19:49,519 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=36236b0d7605,38295,1733966101221, table=testExportExpiredSnapshot, region=e4e1b654a5a995b6e71de7ab606443fe. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-12T01:19:49,519 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=36236b0d7605,33883,1733966101479, table=testExportExpiredSnapshot, region=d644bbcb843c6264137c6748d170fd67. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-12T01:19:49,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure e4e1b654a5a995b6e71de7ab606443fe, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:19:49,521 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=182 2024-12-12T01:19:49,521 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure d644bbcb843c6264137c6748d170fd67, server=36236b0d7605,33883,1733966101479 in 172 msec 2024-12-12T01:19:49,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=181 2024-12-12T01:19:49,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure e4e1b654a5a995b6e71de7ab606443fe, server=36236b0d7605,38295,1733966101221 in 173 msec 2024-12-12T01:19:49,523 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d644bbcb843c6264137c6748d170fd67, ASSIGN in 329 msec 2024-12-12T01:19:49,524 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-12-12T01:19:49,524 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=e4e1b654a5a995b6e71de7ab606443fe, ASSIGN in 330 msec 2024-12-12T01:19:49,524 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T01:19:49,525 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966389524"}]},"ts":"1733966389524"} 2024-12-12T01:19:49,526 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-12T01:19:49,527 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T01:19:49,527 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-12T01:19:49,530 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-12T01:19:49,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:49,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:49,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:49,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:19:49,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-12T01:19:50,147 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:50,147 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:50,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:50,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:50,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:50,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:50,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:50,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:19:50,149 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 991 msec 2024-12-12T01:19:50,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-12T01:19:50,295 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-12-12T01:19:50,295 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-12T01:19:50,295 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:19:50,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-12T01:19:50,299 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:19:50,299 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportExpiredSnapshot assigned. 2024-12-12T01:19:50,299 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-12T01:19:50,304 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='09a5a422cb2260d518a00e0f1528ac548', locateType=CURRENT is [region=testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:19:50,305 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='1b397080a08c453de745e1aee1c5d551e', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:19:50,306 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='2af4948308198e72f4e35d1bc3d898488', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:19:50,307 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='35e542ff652d102190bde596688a64491', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:19:50,307 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='4caf57f82061bbd4d2daed13ce3b2c8d3', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:19:50,311 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:19:50,313 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33883 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:19:50,314 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-12T01:19:50,317 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-12T01:19:50,317 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. 2024-12-12T01:19:50,317 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:19:50,319 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-12T01:19:50,323 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-12T01:19:50,329 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-12T01:19:50,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-12T01:19:50,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:19:50,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@278126c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:50,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:19:50,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:19:50,330 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:19:50,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:19:50,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:19:50,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d2e1b5b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:50,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:19:50,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:19:50,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:50,332 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41556, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:19:50,332 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1db4e30a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:50,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:19:50,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:19:50,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:19:50,334 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39086, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:19:50,335 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:19:50,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:19:50,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:50,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:50,335 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:19:50,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2721c24a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:50,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:19:50,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:19:50,337 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:19:50,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:19:50,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:19:50,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60233e47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:50,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:19:50,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:19:50,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:50,338 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41586, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:19:50,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55325ec7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:19:50,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:19:50,339 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:19:50,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:19:50,340 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39092, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:19:50,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:19:50,343 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:19:50,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:19:50,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:50,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:19:50,343 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:19:50,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-12T01:19:50,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:19:50,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-12T01:19:50,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-12T01:19:50,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-12T01:19:50,345 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:19:50,346 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:19:50,348 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:19:50,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742225_1401 (size=152) 2024-12-12T01:19:50,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742225_1401 (size=152) 2024-12-12T01:19:50,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742225_1401 (size=152) 2024-12-12T01:19:50,355 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:19:50,355 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e4e1b654a5a995b6e71de7ab606443fe}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d644bbcb843c6264137c6748d170fd67}] 2024-12-12T01:19:50,356 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:19:50,356 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d644bbcb843c6264137c6748d170fd67 2024-12-12T01:19:50,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-12T01:19:50,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-12T01:19:50,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-12T01:19:50,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. 2024-12-12T01:19:50,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. 2024-12-12T01:19:50,508 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing e4e1b654a5a995b6e71de7ab606443fe 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-12T01:19:50,508 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing d644bbcb843c6264137c6748d170fd67 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-12T01:19:50,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/e4e1b654a5a995b6e71de7ab606443fe/.tmp/cf/a2a2f35bcf084605b9c56355729ed8f8 is 71, key is 0e6a5bd404617a10f13314bcd5eac527/cf:q/1733966390311/Put/seqid=0 2024-12-12T01:19:50,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/d644bbcb843c6264137c6748d170fd67/.tmp/cf/aeff75899cc1413b8acd93f728c50381 is 71, key is 11be4640092117c6c32f6de98aa72fc6/cf:q/1733966390313/Put/seqid=0 2024-12-12T01:19:50,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742227_1403 (size=8324) 2024-12-12T01:19:50,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742226_1402 (size=5288) 2024-12-12T01:19:50,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742226_1402 (size=5288) 2024-12-12T01:19:50,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742227_1403 (size=8324) 2024-12-12T01:19:50,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742226_1402 (size=5288) 2024-12-12T01:19:50,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742227_1403 (size=8324) 2024-12-12T01:19:50,533 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/e4e1b654a5a995b6e71de7ab606443fe/.tmp/cf/a2a2f35bcf084605b9c56355729ed8f8 2024-12-12T01:19:50,533 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/d644bbcb843c6264137c6748d170fd67/.tmp/cf/aeff75899cc1413b8acd93f728c50381 2024-12-12T01:19:50,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/e4e1b654a5a995b6e71de7ab606443fe/.tmp/cf/a2a2f35bcf084605b9c56355729ed8f8 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/e4e1b654a5a995b6e71de7ab606443fe/cf/a2a2f35bcf084605b9c56355729ed8f8 2024-12-12T01:19:50,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/d644bbcb843c6264137c6748d170fd67/.tmp/cf/aeff75899cc1413b8acd93f728c50381 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/d644bbcb843c6264137c6748d170fd67/cf/aeff75899cc1413b8acd93f728c50381 2024-12-12T01:19:50,542 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/e4e1b654a5a995b6e71de7ab606443fe/cf/a2a2f35bcf084605b9c56355729ed8f8, entries=3, sequenceid=5, filesize=5.2 K 2024-12-12T01:19:50,542 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/d644bbcb843c6264137c6748d170fd67/cf/aeff75899cc1413b8acd93f728c50381, entries=47, sequenceid=5, filesize=8.1 K 2024-12-12T01:19:50,543 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for e4e1b654a5a995b6e71de7ab606443fe in 35ms, sequenceid=5, compaction requested=false 2024-12-12T01:19:50,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-12T01:19:50,543 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for d644bbcb843c6264137c6748d170fd67 in 35ms, sequenceid=5, compaction requested=false 2024-12-12T01:19:50,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-12T01:19:50,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for d644bbcb843c6264137c6748d170fd67: 2024-12-12T01:19:50,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for e4e1b654a5a995b6e71de7ab606443fe: 2024-12-12T01:19:50,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. for snapshot-testExportExpiredSnapshot completed. 2024-12-12T01:19:50,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. for snapshot-testExportExpiredSnapshot completed. 2024-12-12T01:19:50,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-12T01:19:50,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-12T01:19:50,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:19:50,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:19:50,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/e4e1b654a5a995b6e71de7ab606443fe/cf/a2a2f35bcf084605b9c56355729ed8f8] hfiles 2024-12-12T01:19:50,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/d644bbcb843c6264137c6748d170fd67/cf/aeff75899cc1413b8acd93f728c50381] hfiles 2024-12-12T01:19:50,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/e4e1b654a5a995b6e71de7ab606443fe/cf/a2a2f35bcf084605b9c56355729ed8f8 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-12T01:19:50,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/d644bbcb843c6264137c6748d170fd67/cf/aeff75899cc1413b8acd93f728c50381 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-12T01:19:50,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742229_1405 (size=103) 2024-12-12T01:19:50,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742229_1405 (size=103) 2024-12-12T01:19:50,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742229_1405 (size=103) 2024-12-12T01:19:50,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. 2024-12-12T01:19:50,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-12T01:19:50,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-12-12T01:19:50,554 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:19:50,555 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:19:50,556 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e4e1b654a5a995b6e71de7ab606443fe in 200 msec 2024-12-12T01:19:50,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742228_1404 (size=103) 2024-12-12T01:19:50,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742228_1404 (size=103) 2024-12-12T01:19:50,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742228_1404 (size=103) 2024-12-12T01:19:50,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. 2024-12-12T01:19:50,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-12T01:19:50,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-12-12T01:19:50,562 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region d644bbcb843c6264137c6748d170fd67 2024-12-12T01:19:50,563 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d644bbcb843c6264137c6748d170fd67 2024-12-12T01:19:50,565 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=187, resume processing ppid=185 2024-12-12T01:19:50,565 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d644bbcb843c6264137c6748d170fd67 in 208 msec 2024-12-12T01:19:50,565 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:19:50,565 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:19:50,566 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:19:50,566 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-12T01:19:50,566 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-12T01:19:50,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742230_1406 (size=609) 2024-12-12T01:19:50,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742230_1406 (size=609) 2024-12-12T01:19:50,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742230_1406 (size=609) 2024-12-12T01:19:50,579 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:19:50,583 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:19:50,584 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-12T01:19:50,585 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:19:50,585 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-12T01:19:50,586 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 241 msec 2024-12-12T01:19:50,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-12T01:19:50,665 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-12-12T01:19:50,711 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-12T01:19:50,711 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-12T01:19:50,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-12T01:19:50,712 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-12T01:19:50,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-12T01:19:50,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-12T01:19:51,353 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0007_000001 (auth:SIMPLE) from 127.0.0.1:44542 2024-12-12T01:19:51,363 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_2/usercache/jenkins/appcache/application_1733966109028_0007/container_1733966109028_0007_01_000001/launch_container.sh] 2024-12-12T01:19:51,363 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_2/usercache/jenkins/appcache/application_1733966109028_0007/container_1733966109028_0007_01_000001/container_tokens] 2024-12-12T01:19:51,363 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_2/usercache/jenkins/appcache/application_1733966109028_0007/container_1733966109028_0007_01_000001/sysfs] 2024-12-12T01:19:52,268 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:19:58,910 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:20:00,672 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966400672 2024-12-12T01:20:00,672 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33353, tgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966400672, rawTgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966400672, srcFsUri=hdfs://localhost:33353, srcDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:20:00,705 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33353, inputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:20:00,705 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966400672, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966400672/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-12T01:20:00,706 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-12T01:20:00,708 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T01:20:00,709 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-12T01:20:00,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T01:20:00,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-12T01:20:00,712 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966400712"}]},"ts":"1733966400712"} 2024-12-12T01:20:00,713 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-12T01:20:00,713 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-12T01:20:00,714 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-12T01:20:00,715 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b350edc44c3ca5c3a550d7a8c4d546a2, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e8af4be5fd37254877e2fab7e9514d84, UNASSIGN}] 2024-12-12T01:20:00,716 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e8af4be5fd37254877e2fab7e9514d84, UNASSIGN 2024-12-12T01:20:00,716 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b350edc44c3ca5c3a550d7a8c4d546a2, UNASSIGN 2024-12-12T01:20:00,717 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=e8af4be5fd37254877e2fab7e9514d84, regionState=CLOSING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:20:00,717 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=b350edc44c3ca5c3a550d7a8c4d546a2, regionState=CLOSING, regionLocation=36236b0d7605,44743,1733966101408 2024-12-12T01:20:00,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b350edc44c3ca5c3a550d7a8c4d546a2, UNASSIGN because future has completed 2024-12-12T01:20:00,719 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:20:00,719 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure b350edc44c3ca5c3a550d7a8c4d546a2, server=36236b0d7605,44743,1733966101408}] 2024-12-12T01:20:00,720 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e8af4be5fd37254877e2fab7e9514d84, UNASSIGN because future has completed 2024-12-12T01:20:00,720 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:20:00,720 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure e8af4be5fd37254877e2fab7e9514d84, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:20:00,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-12T01:20:00,872 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:20:00,872 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:20:00,872 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing b350edc44c3ca5c3a550d7a8c4d546a2, disabling compactions & flushes 2024-12-12T01:20:00,873 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:20:00,873 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:20:00,873 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. after waiting 0 ms 2024-12-12T01:20:00,873 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:20:00,873 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:20:00,873 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:20:00,873 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing e8af4be5fd37254877e2fab7e9514d84, disabling compactions & flushes 2024-12-12T01:20:00,873 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. 2024-12-12T01:20:00,873 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. 2024-12-12T01:20:00,873 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. after waiting 0 ms 2024-12-12T01:20:00,873 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. 2024-12-12T01:20:00,893 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:20:00,893 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:20:00,894 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2. 2024-12-12T01:20:00,894 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for b350edc44c3ca5c3a550d7a8c4d546a2: Waiting for close lock at 1733966400872Running coprocessor pre-close hooks at 1733966400872Disabling compacts and flushes for region at 1733966400872Disabling writes for close at 1733966400873 (+1 ms)Writing region close event to WAL at 1733966400874 (+1 ms)Running coprocessor post-close hooks at 1733966400893 (+19 ms)Closed at 1733966400893 2024-12-12T01:20:00,897 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:20:00,897 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=b350edc44c3ca5c3a550d7a8c4d546a2, regionState=CLOSED 2024-12-12T01:20:00,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure b350edc44c3ca5c3a550d7a8c4d546a2, server=36236b0d7605,44743,1733966101408 because future has completed 2024-12-12T01:20:00,902 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=190 2024-12-12T01:20:00,902 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure b350edc44c3ca5c3a550d7a8c4d546a2, server=36236b0d7605,44743,1733966101408 in 181 msec 2024-12-12T01:20:00,904 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b350edc44c3ca5c3a550d7a8c4d546a2, UNASSIGN in 187 msec 2024-12-12T01:20:00,910 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:20:00,910 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:20:00,910 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84. 2024-12-12T01:20:00,910 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for e8af4be5fd37254877e2fab7e9514d84: Waiting for close lock at 1733966400873Running coprocessor pre-close hooks at 1733966400873Disabling compacts and flushes for region at 1733966400873Disabling writes for close at 1733966400873Writing region close event to WAL at 1733966400881 (+8 ms)Running coprocessor post-close hooks at 1733966400910 (+29 ms)Closed at 1733966400910 2024-12-12T01:20:00,912 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:20:00,913 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=e8af4be5fd37254877e2fab7e9514d84, regionState=CLOSED 2024-12-12T01:20:00,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure e8af4be5fd37254877e2fab7e9514d84, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:20:00,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=191 2024-12-12T01:20:00,919 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure e8af4be5fd37254877e2fab7e9514d84, server=36236b0d7605,33883,1733966101479 in 196 msec 2024-12-12T01:20:00,920 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=189 2024-12-12T01:20:00,920 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=e8af4be5fd37254877e2fab7e9514d84, UNASSIGN in 203 msec 2024-12-12T01:20:00,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-12-12T01:20:00,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 207 msec 2024-12-12T01:20:00,925 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966400925"}]},"ts":"1733966400925"} 2024-12-12T01:20:00,927 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-12T01:20:00,927 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-12T01:20:00,929 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 219 msec 2024-12-12T01:20:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-12T01:20:01,029 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-12T01:20:01,029 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,032 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,033 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,037 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,044 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:20:01,044 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:20:01,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,047 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2/recovered.edits] 2024-12-12T01:20:01,048 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84/recovered.edits] 2024-12-12T01:20:01,049 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-12T01:20:01,049 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-12T01:20:01,049 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-12T01:20:01,049 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-12T01:20:01,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:01,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:01,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-12T01:20:01,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:01,053 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:01,053 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:01,055 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:01,055 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2/cf/5a18e617269142f1b9be9502922a53f8 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2/cf/5a18e617269142f1b9be9502922a53f8 2024-12-12T01:20:01,055 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:01,055 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84/cf/d8d4ed82281442768515efde67fff68b to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84/cf/d8d4ed82281442768515efde67fff68b 2024-12-12T01:20:01,059 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2/recovered.edits/9.seqid 2024-12-12T01:20:01,059 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84/recovered.edits/9.seqid 2024-12-12T01:20:01,060 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/b350edc44c3ca5c3a550d7a8c4d546a2 2024-12-12T01:20:01,060 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportExpiredSnapshot/e8af4be5fd37254877e2fab7e9514d84 2024-12-12T01:20:01,060 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-12T01:20:01,062 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,065 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-12T01:20:01,067 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-12T01:20:01,069 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,069 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-12T01:20:01,069 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966401069"}]},"ts":"9223372036854775807"} 2024-12-12T01:20:01,069 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966401069"}]},"ts":"9223372036854775807"} 2024-12-12T01:20:01,072 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-12T01:20:01,072 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => b350edc44c3ca5c3a550d7a8c4d546a2, NAME => 'testtb-testExportExpiredSnapshot,,1733966387511.b350edc44c3ca5c3a550d7a8c4d546a2.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e8af4be5fd37254877e2fab7e9514d84, NAME => 'testtb-testExportExpiredSnapshot,1,1733966387511.e8af4be5fd37254877e2fab7e9514d84.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T01:20:01,073 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-12T01:20:01,073 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733966401073"}]},"ts":"9223372036854775807"} 2024-12-12T01:20:01,075 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-12T01:20:01,077 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,078 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 47 msec 2024-12-12T01:20:01,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-12T01:20:01,166 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-12-12T01:20:01,166 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-12T01:20:01,175 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-12T01:20:01,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-12T01:20:01,178 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-12-12T01:20:01,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-12T01:20:01,181 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-12T01:20:01,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-12T01:20:01,211 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=798 (was 810), OpenFileDescriptor=767 (was 793), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=600 (was 716), ProcessCount=14 (was 14), AvailableMemoryMB=6188 (was 5992) - AvailableMemoryMB LEAK? - 2024-12-12T01:20:01,211 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-12T01:20:01,237 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=798, OpenFileDescriptor=767, MaxFileDescriptor=1048576, SystemLoadAverage=600, ProcessCount=14, AvailableMemoryMB=6183 2024-12-12T01:20:01,237 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-12T01:20:01,239 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T01:20:01,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T01:20:01,243 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T01:20:01,243 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:20:01,243 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-12-12T01:20:01,244 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T01:20:01,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-12T01:20:01,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742231_1407 (size=412) 2024-12-12T01:20:01,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742231_1407 (size=412) 2024-12-12T01:20:01,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742231_1407 (size=412) 2024-12-12T01:20:01,271 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6897d1cee65e3089019400addb12c5a2, NAME => 'testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:20:01,282 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ec3c0892b19bfc98e76c3f97d3d9e8e2, NAME => 'testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:20:01,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742232_1408 (size=73) 2024-12-12T01:20:01,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742232_1408 (size=73) 2024-12-12T01:20:01,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742232_1408 (size=73) 2024-12-12T01:20:01,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742233_1409 (size=73) 2024-12-12T01:20:01,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742233_1409 (size=73) 2024-12-12T01:20:01,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742233_1409 (size=73) 2024-12-12T01:20:01,312 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:20:01,312 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing ec3c0892b19bfc98e76c3f97d3d9e8e2, disabling compactions & flushes 2024-12-12T01:20:01,312 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. 2024-12-12T01:20:01,312 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. 2024-12-12T01:20:01,312 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. after waiting 0 ms 2024-12-12T01:20:01,312 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. 2024-12-12T01:20:01,312 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. 2024-12-12T01:20:01,312 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for ec3c0892b19bfc98e76c3f97d3d9e8e2: Waiting for close lock at 1733966401312Disabling compacts and flushes for region at 1733966401312Disabling writes for close at 1733966401312Writing region close event to WAL at 1733966401312Closed at 1733966401312 2024-12-12T01:20:01,352 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=920.55 KB, freeSize=879.10 MB, max=880 MB, blockCount=5, accesses=7, hits=2, hitRatio=28.57%, , cachingAccesses=7, cachingHits=2, cachingHitsRatio=28.57%, evictions=29, evicted=0, evictedPerRun=0.0 2024-12-12T01:20:01,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-12T01:20:01,436 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-12T01:20:01,500 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-12T01:20:01,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-12T01:20:01,609 DEBUG [master/36236b0d7605:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=18, reuseRatio=64.29% 2024-12-12T01:20:01,610 DEBUG [master/36236b0d7605:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-12T01:20:01,705 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:20:01,705 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 6897d1cee65e3089019400addb12c5a2, disabling compactions & flushes 2024-12-12T01:20:01,705 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:01,705 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:01,705 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. after waiting 0 ms 2024-12-12T01:20:01,705 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:01,705 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:01,705 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6897d1cee65e3089019400addb12c5a2: Waiting for close lock at 1733966401705Disabling compacts and flushes for region at 1733966401705Disabling writes for close at 1733966401705Writing region close event to WAL at 1733966401705Closed at 1733966401705 2024-12-12T01:20:01,707 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T01:20:01,707 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733966401707"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966401707"}]},"ts":"1733966401707"} 2024-12-12T01:20:01,707 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733966401707"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966401707"}]},"ts":"1733966401707"} 2024-12-12T01:20:01,710 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-12T01:20:01,711 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T01:20:01,711 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966401711"}]},"ts":"1733966401711"} 2024-12-12T01:20:01,713 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-12T01:20:01,713 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:20:01,714 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:20:01,714 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:20:01,714 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:20:01,714 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:20:01,714 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:20:01,714 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:20:01,714 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:20:01,714 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:20:01,715 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:20:01,715 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:20:01,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6897d1cee65e3089019400addb12c5a2, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=ec3c0892b19bfc98e76c3f97d3d9e8e2, ASSIGN}] 2024-12-12T01:20:01,716 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=ec3c0892b19bfc98e76c3f97d3d9e8e2, ASSIGN 2024-12-12T01:20:01,716 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6897d1cee65e3089019400addb12c5a2, ASSIGN 2024-12-12T01:20:01,717 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=ec3c0892b19bfc98e76c3f97d3d9e8e2, ASSIGN; state=OFFLINE, location=36236b0d7605,33883,1733966101479; forceNewPlan=false, retain=false 2024-12-12T01:20:01,717 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6897d1cee65e3089019400addb12c5a2, ASSIGN; state=OFFLINE, location=36236b0d7605,38295,1733966101221; forceNewPlan=false, retain=false 2024-12-12T01:20:01,867 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T01:20:01,868 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=6897d1cee65e3089019400addb12c5a2, regionState=OPENING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:20:01,868 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=ec3c0892b19bfc98e76c3f97d3d9e8e2, regionState=OPENING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:20:01,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=ec3c0892b19bfc98e76c3f97d3d9e8e2, ASSIGN because future has completed 2024-12-12T01:20:01,871 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec3c0892b19bfc98e76c3f97d3d9e8e2, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:20:01,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6897d1cee65e3089019400addb12c5a2, ASSIGN because future has completed 2024-12-12T01:20:01,871 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6897d1cee65e3089019400addb12c5a2, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:20:01,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-12T01:20:02,026 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. 2024-12-12T01:20:02,026 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => ec3c0892b19bfc98e76c3f97d3d9e8e2, NAME => 'testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T01:20:02,027 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. service=AccessControlService 2024-12-12T01:20:02,027 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:20:02,027 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:02,027 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:20:02,027 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:02,027 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:02,029 INFO [StoreOpener-ec3c0892b19bfc98e76c3f97d3d9e8e2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:02,030 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:02,030 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => 6897d1cee65e3089019400addb12c5a2, NAME => 'testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T01:20:02,030 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. service=AccessControlService 2024-12-12T01:20:02,030 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:20:02,031 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:02,031 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:20:02,031 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:02,031 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:02,032 INFO [StoreOpener-ec3c0892b19bfc98e76c3f97d3d9e8e2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ec3c0892b19bfc98e76c3f97d3d9e8e2 columnFamilyName cf 2024-12-12T01:20:02,032 DEBUG [StoreOpener-ec3c0892b19bfc98e76c3f97d3d9e8e2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:20:02,032 INFO [StoreOpener-ec3c0892b19bfc98e76c3f97d3d9e8e2-1 {}] regionserver.HStore(327): Store=ec3c0892b19bfc98e76c3f97d3d9e8e2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:20:02,033 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:02,033 INFO [StoreOpener-6897d1cee65e3089019400addb12c5a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:02,033 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:02,034 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:02,034 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:02,034 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:02,035 INFO [StoreOpener-6897d1cee65e3089019400addb12c5a2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6897d1cee65e3089019400addb12c5a2 columnFamilyName cf 2024-12-12T01:20:02,035 DEBUG [StoreOpener-6897d1cee65e3089019400addb12c5a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:20:02,035 INFO [StoreOpener-6897d1cee65e3089019400addb12c5a2-1 {}] regionserver.HStore(327): Store=6897d1cee65e3089019400addb12c5a2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:20:02,035 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:02,036 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:02,036 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:02,037 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:02,037 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:02,037 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:02,039 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:20:02,039 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened ec3c0892b19bfc98e76c3f97d3d9e8e2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67576378, jitterRate=0.006966501474380493}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:20:02,039 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:02,040 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:02,040 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for ec3c0892b19bfc98e76c3f97d3d9e8e2: Running coprocessor pre-open hook at 1733966402027Writing region info on filesystem at 1733966402028 (+1 ms)Initializing all the Stores at 1733966402028Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966402029 (+1 ms)Cleaning up temporary data from old regions at 1733966402034 (+5 ms)Running coprocessor post-open hooks at 1733966402039 (+5 ms)Region opened successfully at 1733966402040 (+1 ms) 2024-12-12T01:20:02,042 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2., pid=198, masterSystemTime=1733966402022 2024-12-12T01:20:02,043 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:20:02,044 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened 6897d1cee65e3089019400addb12c5a2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67346168, jitterRate=0.0035361051559448242}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:20:02,044 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:02,044 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for 6897d1cee65e3089019400addb12c5a2: Running coprocessor pre-open hook at 1733966402031Writing region info on filesystem at 1733966402031Initializing all the Stores at 1733966402032 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966402032Cleaning up temporary data from old regions at 1733966402037 (+5 ms)Running coprocessor post-open hooks at 1733966402044 (+7 ms)Region opened successfully at 1733966402044 2024-12-12T01:20:02,051 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2., pid=199, masterSystemTime=1733966402024 2024-12-12T01:20:02,052 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. 2024-12-12T01:20:02,052 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. 2024-12-12T01:20:02,053 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=ec3c0892b19bfc98e76c3f97d3d9e8e2, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:20:02,054 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:02,054 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:02,055 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=6897d1cee65e3089019400addb12c5a2, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:20:02,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec3c0892b19bfc98e76c3f97d3d9e8e2, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:20:02,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6897d1cee65e3089019400addb12c5a2, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:20:02,062 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=197 2024-12-12T01:20:02,062 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure ec3c0892b19bfc98e76c3f97d3d9e8e2, server=36236b0d7605,33883,1733966101479 in 189 msec 2024-12-12T01:20:02,063 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=196 2024-12-12T01:20:02,066 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure 6897d1cee65e3089019400addb12c5a2, server=36236b0d7605,38295,1733966101221 in 189 msec 2024-12-12T01:20:02,066 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=ec3c0892b19bfc98e76c3f97d3d9e8e2, ASSIGN in 347 msec 2024-12-12T01:20:02,067 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=195 2024-12-12T01:20:02,067 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6897d1cee65e3089019400addb12c5a2, ASSIGN in 348 msec 2024-12-12T01:20:02,073 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T01:20:02,074 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966402074"}]},"ts":"1733966402074"} 2024-12-12T01:20:02,076 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-12T01:20:02,077 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T01:20:02,077 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-12T01:20:02,081 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-12T01:20:02,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:02,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:02,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:02,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:02,087 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:02,087 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:02,087 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:02,088 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:02,088 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:02,088 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:02,089 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:02,089 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:02,090 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 847 msec 2024-12-12T01:20:02,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-12T01:20:02,385 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-12T01:20:02,385 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-12-12T01:20:02,385 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:20:02,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44743 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32778 bytes) of info 2024-12-12T01:20:02,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-12-12T01:20:02,393 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:20:02,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-12-12T01:20:02,393 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-12T01:20:02,395 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T01:20:02,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966402396 (current time:1733966402396). 2024-12-12T01:20:02,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:20:02,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-12T01:20:02,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:20:02,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41fde0a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:02,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:20:02,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:20:02,397 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:20:02,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:20:02,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:20:02,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10d3137d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:02,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:20:02,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:20:02,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:02,398 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58610, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:20:02,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57334495, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:02,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:20:02,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:20:02,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:20:02,401 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45236, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:20:02,402 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:20:02,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:20:02,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:02,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:02,402 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:20:02,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e36597c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:02,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:20:02,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:20:02,404 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:20:02,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:20:02,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:20:02,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@89c91ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:02,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:20:02,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:20:02,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:02,405 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58618, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:20:02,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36564eb2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:02,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:20:02,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:20:02,407 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:20:02,407 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45246, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:20:02,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:20:02,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:20:02,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:20:02,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:02,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:02,410 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:20:02,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-12T01:20:02,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:20:02,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T01:20:02,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-12T01:20:02,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-12T01:20:02,413 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:20:02,414 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:20:02,416 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:20:02,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742234_1410 (size=185) 2024-12-12T01:20:02,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742234_1410 (size=185) 2024-12-12T01:20:02,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742234_1410 (size=185) 2024-12-12T01:20:02,427 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:20:02,427 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6897d1cee65e3089019400addb12c5a2}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec3c0892b19bfc98e76c3f97d3d9e8e2}] 2024-12-12T01:20:02,428 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:02,428 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:02,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-12T01:20:02,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-12-12T01:20:02,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-12-12T01:20:02,580 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:02,580 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. 2024-12-12T01:20:02,580 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for ec3c0892b19bfc98e76c3f97d3d9e8e2: 2024-12-12T01:20:02,580 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for 6897d1cee65e3089019400addb12c5a2: 2024-12-12T01:20:02,580 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-12T01:20:02,580 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-12T01:20:02,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:02,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:02,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:20:02,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:20:02,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:20:02,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:20:02,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742235_1411 (size=76) 2024-12-12T01:20:02,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742235_1411 (size=76) 2024-12-12T01:20:02,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742235_1411 (size=76) 2024-12-12T01:20:02,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742236_1412 (size=76) 2024-12-12T01:20:02,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:02,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742236_1412 (size=76) 2024-12-12T01:20:02,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-12-12T01:20:02,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742236_1412 (size=76) 2024-12-12T01:20:02,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. 2024-12-12T01:20:02,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-12-12T01:20:02,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-12-12T01:20:02,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-12-12T01:20:02,590 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:02,590 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:02,590 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:02,590 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:02,593 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ec3c0892b19bfc98e76c3f97d3d9e8e2 in 164 msec 2024-12-12T01:20:02,594 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=201, resume processing ppid=200 2024-12-12T01:20:02,594 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6897d1cee65e3089019400addb12c5a2 in 164 msec 2024-12-12T01:20:02,594 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:20:02,595 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:20:02,595 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:20:02,596 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:02,596 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:02,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742237_1413 (size=567) 2024-12-12T01:20:02,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742237_1413 (size=567) 2024-12-12T01:20:02,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742237_1413 (size=567) 2024-12-12T01:20:02,612 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:20:02,618 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:20:02,619 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:02,620 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:20:02,620 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-12T01:20:02,622 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 210 msec 2024-12-12T01:20:02,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-12T01:20:02,735 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-12T01:20:02,739 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='0591e4e58ec3eb1c9cc58a9f57a4462be', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:20:02,740 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='17f365f2f9cba0afffe07fd84d48ce97e', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:20:02,741 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='3c5a873aacef36492bd71265913ec3d6f', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:20:02,742 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='2923c837d57a9be142341d52f64b1f239', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:20:02,742 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='430ef45f493b302300850a86013a2628c', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:20:02,749 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:20:02,752 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33883 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:20:02,753 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-12T01:20:02,756 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-12T01:20:02,756 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:02,756 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:20:02,758 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-12T01:20:02,763 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-12T01:20:02,768 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-12T01:20:02,770 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T01:20:02,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966402770 (current time:1733966402770). 2024-12-12T01:20:02,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:20:02,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-12T01:20:02,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:20:02,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d55ed50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:02,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:20:02,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:20:02,772 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:20:02,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:20:02,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:20:02,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35cd2e06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:02,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:20:02,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:20:02,773 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:02,774 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33394, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:20:02,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6469e0d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:02,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:20:02,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:20:02,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:20:02,777 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35554, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:20:02,778 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:20:02,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:20:02,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:02,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:02,778 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:20:02,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2646669b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:02,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:20:02,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:20:02,779 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:20:02,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:20:02,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:20:02,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78abb32d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:02,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:20:02,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:20:02,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:02,781 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33416, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:20:02,781 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79004ee2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:02,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:20:02,782 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:20:02,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:20:02,783 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35568, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:20:02,785 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:20:02,786 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:20:02,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:20:02,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:02,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:02,786 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:20:02,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-12T01:20:02,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:20:02,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-12T01:20:02,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-12T01:20:02,789 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:20:02,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-12T01:20:02,790 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:20:02,792 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:20:02,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742238_1414 (size=180) 2024-12-12T01:20:02,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742238_1414 (size=180) 2024-12-12T01:20:02,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742238_1414 (size=180) 2024-12-12T01:20:02,798 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:20:02,798 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6897d1cee65e3089019400addb12c5a2}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec3c0892b19bfc98e76c3f97d3d9e8e2}] 2024-12-12T01:20:02,799 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:02,799 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:02,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-12T01:20:02,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-12T01:20:02,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-12T01:20:02,951 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. 2024-12-12T01:20:02,951 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:02,951 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing 6897d1cee65e3089019400addb12c5a2 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-12T01:20:02,951 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing ec3c0892b19bfc98e76c3f97d3d9e8e2 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-12T01:20:02,971 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2/.tmp/cf/ba50bb0d3d164cd38543c4ca83e221c7 is 71, key is 1721decbe06370b81d1a3c4c80e88702/cf:q/1733966402752/Put/seqid=0 2024-12-12T01:20:02,977 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2/.tmp/cf/ffb99fcb80db43f9a982bcd5113ea4d9 is 69, key is 0591e4e58ec3eb1c9cc58a9f57a4462be/cf:q/1733966402749/Put/seqid=0 2024-12-12T01:20:02,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742239_1415 (size=8460) 2024-12-12T01:20:02,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742239_1415 (size=8460) 2024-12-12T01:20:02,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742239_1415 (size=8460) 2024-12-12T01:20:02,990 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2/.tmp/cf/ba50bb0d3d164cd38543c4ca83e221c7 2024-12-12T01:20:02,996 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2/.tmp/cf/ba50bb0d3d164cd38543c4ca83e221c7 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2/cf/ba50bb0d3d164cd38543c4ca83e221c7 2024-12-12T01:20:02,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742240_1416 (size=5149) 2024-12-12T01:20:02,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742240_1416 (size=5149) 2024-12-12T01:20:02,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742240_1416 (size=5149) 2024-12-12T01:20:02,999 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2/.tmp/cf/ffb99fcb80db43f9a982bcd5113ea4d9 2024-12-12T01:20:03,003 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2/cf/ba50bb0d3d164cd38543c4ca83e221c7, entries=49, sequenceid=6, filesize=8.3 K 2024-12-12T01:20:03,004 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for ec3c0892b19bfc98e76c3f97d3d9e8e2 in 53ms, sequenceid=6, compaction requested=false 2024-12-12T01:20:03,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-12T01:20:03,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for ec3c0892b19bfc98e76c3f97d3d9e8e2: 2024-12-12T01:20:03,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-12T01:20:03,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:03,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:20:03,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2/cf/ba50bb0d3d164cd38543c4ca83e221c7] hfiles 2024-12-12T01:20:03,005 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2/cf/ba50bb0d3d164cd38543c4ca83e221c7 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:03,010 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2/.tmp/cf/ffb99fcb80db43f9a982bcd5113ea4d9 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2/cf/ffb99fcb80db43f9a982bcd5113ea4d9 2024-12-12T01:20:03,016 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2/cf/ffb99fcb80db43f9a982bcd5113ea4d9, entries=1, sequenceid=6, filesize=5.0 K 2024-12-12T01:20:03,017 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 6897d1cee65e3089019400addb12c5a2 in 66ms, sequenceid=6, compaction requested=false 2024-12-12T01:20:03,017 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for 6897d1cee65e3089019400addb12c5a2: 2024-12-12T01:20:03,017 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-12T01:20:03,017 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:03,017 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:20:03,017 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2/cf/ffb99fcb80db43f9a982bcd5113ea4d9] hfiles 2024-12-12T01:20:03,017 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2/cf/ffb99fcb80db43f9a982bcd5113ea4d9 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:03,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742241_1417 (size=115) 2024-12-12T01:20:03,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742241_1417 (size=115) 2024-12-12T01:20:03,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742241_1417 (size=115) 2024-12-12T01:20:03,024 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. 2024-12-12T01:20:03,024 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-12T01:20:03,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-12-12T01:20:03,025 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:03,025 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:03,027 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ec3c0892b19bfc98e76c3f97d3d9e8e2 in 228 msec 2024-12-12T01:20:03,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742242_1418 (size=115) 2024-12-12T01:20:03,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742242_1418 (size=115) 2024-12-12T01:20:03,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742242_1418 (size=115) 2024-12-12T01:20:03,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:03,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-12T01:20:03,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-12-12T01:20:03,032 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:03,032 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:03,035 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=204, resume processing ppid=203 2024-12-12T01:20:03,035 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:20:03,035 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6897d1cee65e3089019400addb12c5a2 in 235 msec 2024-12-12T01:20:03,036 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:20:03,036 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:20:03,036 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:03,037 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:03,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742243_1419 (size=645) 2024-12-12T01:20:03,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742243_1419 (size=645) 2024-12-12T01:20:03,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742243_1419 (size=645) 2024-12-12T01:20:03,057 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:20:03,063 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:20:03,063 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:03,065 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:20:03,065 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-12T01:20:03,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 278 msec 2024-12-12T01:20:03,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-12T01:20:03,105 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-12T01:20:03,105 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966403105 2024-12-12T01:20:03,105 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33353, tgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966403105, rawTgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966403105, srcFsUri=hdfs://localhost:33353, srcDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:20:03,151 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33353, inputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:20:03,151 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966403105, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966403105/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:03,154 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-12T01:20:03,158 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966403105/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:03,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742244_1420 (size=185) 2024-12-12T01:20:03,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742244_1420 (size=185) 2024-12-12T01:20:03,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742244_1420 (size=185) 2024-12-12T01:20:03,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742245_1421 (size=567) 2024-12-12T01:20:03,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742245_1421 (size=567) 2024-12-12T01:20:03,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742245_1421 (size=567) 2024-12-12T01:20:03,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:03,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:03,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:03,478 INFO [regionserver/36236b0d7605:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-12T01:20:03,478 INFO [regionserver/36236b0d7605:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-12T01:20:03,501 INFO [regionserver/36236b0d7605:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-12T01:20:04,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-15798211180780312219.jar 2024-12-12T01:20:04,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:04,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:04,454 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-11030601776414030696.jar 2024-12-12T01:20:04,454 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:04,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:04,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:04,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:04,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:04,456 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:04,456 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T01:20:04,456 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T01:20:04,457 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T01:20:04,457 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T01:20:04,457 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T01:20:04,458 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T01:20:04,458 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T01:20:04,458 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T01:20:04,458 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T01:20:04,459 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T01:20:04,459 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T01:20:04,460 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:20:04,460 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:20:04,460 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:20:04,460 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:20:04,461 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:20:04,461 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:20:04,461 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:20:04,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742246_1422 (size=131440) 2024-12-12T01:20:04,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742246_1422 (size=131440) 2024-12-12T01:20:04,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742246_1422 (size=131440) 2024-12-12T01:20:04,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742247_1423 (size=4188619) 2024-12-12T01:20:04,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742247_1423 (size=4188619) 2024-12-12T01:20:04,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742247_1423 (size=4188619) 2024-12-12T01:20:04,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742248_1424 (size=1323991) 2024-12-12T01:20:04,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742248_1424 (size=1323991) 2024-12-12T01:20:04,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742248_1424 (size=1323991) 2024-12-12T01:20:04,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742249_1425 (size=903541) 2024-12-12T01:20:04,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742249_1425 (size=903541) 2024-12-12T01:20:04,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742249_1425 (size=903541) 2024-12-12T01:20:04,735 DEBUG [master/36236b0d7605:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-12T01:20:04,738 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region d644bbcb843c6264137c6748d170fd67 changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:20:04,739 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ec3c0892b19bfc98e76c3f97d3d9e8e2 changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:20:04,740 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region e4e1b654a5a995b6e71de7ab606443fe changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:20:04,740 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 6897d1cee65e3089019400addb12c5a2 changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:20:04,741 DEBUG [master/36236b0d7605:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-12-12T01:20:04,741 INFO [master/36236b0d7605:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-12-12T01:20:04,741 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-12-12T01:20:04,742 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:20:04,743 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 2 regions 2024-12-12T01:20:04,743 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 2 regions 2024-12-12T01:20:04,743 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 2 regions 2024-12-12T01:20:04,743 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:20:04,743 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:20:04,743 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:20:04,743 INFO [master/36236b0d7605:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:20:04,743 INFO [master/36236b0d7605:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:20:04,743 INFO [master/36236b0d7605:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:20:04,743 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-12-12T01:20:04,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742250_1426 (size=8360360) 2024-12-12T01:20:04,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742250_1426 (size=8360360) 2024-12-12T01:20:04,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742250_1426 (size=8360360) 2024-12-12T01:20:04,756 INFO [master/36236b0d7605:0.Chore.1 {}] balancer.StochasticLoadBalancer(395): Cluster wide - skipping load balancing because weighted average imbalance=0.01922020867655135 <= threshold(0.025). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 0.025 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.33333333333333337, need balance); 2024-12-12T01:20:04,757 DEBUG [master/36236b0d7605:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-12-12T01:20:04,764 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-12T01:20:04,764 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testEmptyExportFileSystemState because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-12T01:20:04,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742251_1427 (size=1877034) 2024-12-12T01:20:04,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742251_1427 (size=1877034) 2024-12-12T01:20:04,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742251_1427 (size=1877034) 2024-12-12T01:20:04,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742252_1428 (size=77835) 2024-12-12T01:20:04,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742252_1428 (size=77835) 2024-12-12T01:20:04,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742252_1428 (size=77835) 2024-12-12T01:20:04,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742253_1429 (size=30949) 2024-12-12T01:20:04,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742253_1429 (size=30949) 2024-12-12T01:20:04,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742253_1429 (size=30949) 2024-12-12T01:20:05,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742254_1430 (size=1597213) 2024-12-12T01:20:05,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742254_1430 (size=1597213) 2024-12-12T01:20:05,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742254_1430 (size=1597213) 2024-12-12T01:20:05,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742255_1431 (size=443172) 2024-12-12T01:20:05,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742255_1431 (size=443172) 2024-12-12T01:20:05,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742255_1431 (size=443172) 2024-12-12T01:20:05,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742256_1432 (size=4695811) 2024-12-12T01:20:05,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742256_1432 (size=4695811) 2024-12-12T01:20:05,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742256_1432 (size=4695811) 2024-12-12T01:20:05,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742257_1433 (size=6426224) 2024-12-12T01:20:05,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742257_1433 (size=6426224) 2024-12-12T01:20:05,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742257_1433 (size=6426224) 2024-12-12T01:20:05,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742258_1434 (size=232957) 2024-12-12T01:20:05,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742258_1434 (size=232957) 2024-12-12T01:20:05,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742258_1434 (size=232957) 2024-12-12T01:20:05,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742259_1435 (size=127628) 2024-12-12T01:20:05,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742259_1435 (size=127628) 2024-12-12T01:20:05,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742259_1435 (size=127628) 2024-12-12T01:20:05,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742260_1436 (size=20406) 2024-12-12T01:20:05,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742260_1436 (size=20406) 2024-12-12T01:20:05,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742260_1436 (size=20406) 2024-12-12T01:20:05,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742261_1437 (size=5175431) 2024-12-12T01:20:05,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742261_1437 (size=5175431) 2024-12-12T01:20:05,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742261_1437 (size=5175431) 2024-12-12T01:20:05,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742262_1438 (size=217634) 2024-12-12T01:20:05,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742262_1438 (size=217634) 2024-12-12T01:20:05,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742262_1438 (size=217634) 2024-12-12T01:20:05,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742263_1439 (size=1832290) 2024-12-12T01:20:05,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742263_1439 (size=1832290) 2024-12-12T01:20:05,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742263_1439 (size=1832290) 2024-12-12T01:20:05,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742264_1440 (size=322274) 2024-12-12T01:20:05,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742264_1440 (size=322274) 2024-12-12T01:20:05,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742264_1440 (size=322274) 2024-12-12T01:20:05,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742265_1441 (size=503880) 2024-12-12T01:20:05,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742265_1441 (size=503880) 2024-12-12T01:20:05,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742265_1441 (size=503880) 2024-12-12T01:20:05,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742266_1442 (size=29229) 2024-12-12T01:20:05,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742266_1442 (size=29229) 2024-12-12T01:20:05,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742266_1442 (size=29229) 2024-12-12T01:20:05,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742267_1443 (size=24096) 2024-12-12T01:20:05,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742267_1443 (size=24096) 2024-12-12T01:20:05,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742267_1443 (size=24096) 2024-12-12T01:20:05,442 INFO [regionserver/36236b0d7605:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/info has an old edit so flush to free WALs after random delay 62408 ms 2024-12-12T01:20:05,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742268_1444 (size=111872) 2024-12-12T01:20:05,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742268_1444 (size=111872) 2024-12-12T01:20:05,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742268_1444 (size=111872) 2024-12-12T01:20:05,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742269_1445 (size=45609) 2024-12-12T01:20:05,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742269_1445 (size=45609) 2024-12-12T01:20:05,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742269_1445 (size=45609) 2024-12-12T01:20:05,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742270_1446 (size=136454) 2024-12-12T01:20:05,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742270_1446 (size=136454) 2024-12-12T01:20:05,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742270_1446 (size=136454) 2024-12-12T01:20:05,840 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T01:20:05,842 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-12T01:20:05,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742271_1447 (size=7) 2024-12-12T01:20:05,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742271_1447 (size=7) 2024-12-12T01:20:05,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742271_1447 (size=7) 2024-12-12T01:20:05,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742272_1448 (size=10) 2024-12-12T01:20:05,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742272_1448 (size=10) 2024-12-12T01:20:05,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742272_1448 (size=10) 2024-12-12T01:20:05,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742273_1449 (size=303907) 2024-12-12T01:20:05,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742273_1449 (size=303907) 2024-12-12T01:20:05,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742273_1449 (size=303907) 2024-12-12T01:20:05,892 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:20:05,892 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:20:06,227 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0008_000001 (auth:SIMPLE) from 127.0.0.1:50738 2024-12-12T01:20:06,416 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:20:10,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-12T01:20:10,712 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-12T01:20:10,713 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-12T01:20:11,499 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0008_000001 (auth:SIMPLE) from 127.0.0.1:57920 2024-12-12T01:20:11,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742274_1450 (size=349581) 2024-12-12T01:20:11,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742274_1450 (size=349581) 2024-12-12T01:20:11,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742274_1450 (size=349581) 2024-12-12T01:20:12,416 INFO [regionserver/36236b0d7605:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. because a76413eb65b7bad8091ed755bedda4fd/l has an old edit so flush to free WALs after random delay 258404 ms 2024-12-12T01:20:12,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742275_1451 (size=8568) 2024-12-12T01:20:12,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742275_1451 (size=8568) 2024-12-12T01:20:12,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742275_1451 (size=8568) 2024-12-12T01:20:12,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742276_1452 (size=460) 2024-12-12T01:20:12,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742276_1452 (size=460) 2024-12-12T01:20:12,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742276_1452 (size=460) 2024-12-12T01:20:12,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742277_1453 (size=8568) 2024-12-12T01:20:12,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742277_1453 (size=8568) 2024-12-12T01:20:12,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742277_1453 (size=8568) 2024-12-12T01:20:12,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742278_1454 (size=349581) 2024-12-12T01:20:12,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742278_1454 (size=349581) 2024-12-12T01:20:12,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742278_1454 (size=349581) 2024-12-12T01:20:14,003 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-12T01:20:14,004 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-12T01:20:14,009 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:14,010 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-12T01:20:14,010 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-12T01:20:14,010 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:14,011 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-12T01:20:14,011 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-12T01:20:14,011 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966403105/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966403105/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:14,011 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966403105/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-12T01:20:14,011 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966403105/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-12T01:20:14,024 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-12T01:20:14,028 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966414028"}]},"ts":"1733966414028"} 2024-12-12T01:20:14,030 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-12T01:20:14,030 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-12T01:20:14,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-12T01:20:14,032 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6897d1cee65e3089019400addb12c5a2, UNASSIGN}, {pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=ec3c0892b19bfc98e76c3f97d3d9e8e2, UNASSIGN}] 2024-12-12T01:20:14,032 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=ec3c0892b19bfc98e76c3f97d3d9e8e2, UNASSIGN 2024-12-12T01:20:14,032 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6897d1cee65e3089019400addb12c5a2, UNASSIGN 2024-12-12T01:20:14,033 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=ec3c0892b19bfc98e76c3f97d3d9e8e2, regionState=CLOSING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:20:14,033 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=6897d1cee65e3089019400addb12c5a2, regionState=CLOSING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:20:14,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6897d1cee65e3089019400addb12c5a2, UNASSIGN because future has completed 2024-12-12T01:20:14,035 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:20:14,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6897d1cee65e3089019400addb12c5a2, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:20:14,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=ec3c0892b19bfc98e76c3f97d3d9e8e2, UNASSIGN because future has completed 2024-12-12T01:20:14,036 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:20:14,036 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure ec3c0892b19bfc98e76c3f97d3d9e8e2, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:20:14,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-12T01:20:14,188 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:14,188 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:20:14,188 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing 6897d1cee65e3089019400addb12c5a2, disabling compactions & flushes 2024-12-12T01:20:14,188 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:14,188 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:14,188 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. after waiting 0 ms 2024-12-12T01:20:14,188 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:14,189 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:14,189 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:20:14,189 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing ec3c0892b19bfc98e76c3f97d3d9e8e2, disabling compactions & flushes 2024-12-12T01:20:14,189 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. 2024-12-12T01:20:14,189 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. 2024-12-12T01:20:14,189 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. after waiting 0 ms 2024-12-12T01:20:14,189 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. 2024-12-12T01:20:14,197 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:20:14,197 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:20:14,198 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:20:14,198 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:20:14,198 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2. 2024-12-12T01:20:14,198 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2. 2024-12-12T01:20:14,198 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for ec3c0892b19bfc98e76c3f97d3d9e8e2: Waiting for close lock at 1733966414189Running coprocessor pre-close hooks at 1733966414189Disabling compacts and flushes for region at 1733966414189Disabling writes for close at 1733966414189Writing region close event to WAL at 1733966414190 (+1 ms)Running coprocessor post-close hooks at 1733966414198 (+8 ms)Closed at 1733966414198 2024-12-12T01:20:14,198 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for 6897d1cee65e3089019400addb12c5a2: Waiting for close lock at 1733966414188Running coprocessor pre-close hooks at 1733966414188Disabling compacts and flushes for region at 1733966414188Disabling writes for close at 1733966414188Writing region close event to WAL at 1733966414189 (+1 ms)Running coprocessor post-close hooks at 1733966414197 (+8 ms)Closed at 1733966414198 (+1 ms) 2024-12-12T01:20:14,199 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed 6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:14,201 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=6897d1cee65e3089019400addb12c5a2, regionState=CLOSED 2024-12-12T01:20:14,201 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:14,202 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=ec3c0892b19bfc98e76c3f97d3d9e8e2, regionState=CLOSED 2024-12-12T01:20:14,203 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6897d1cee65e3089019400addb12c5a2, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:20:14,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure ec3c0892b19bfc98e76c3f97d3d9e8e2, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:20:14,207 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=208 2024-12-12T01:20:14,207 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=208, state=SUCCESS, hasLock=false; CloseRegionProcedure 6897d1cee65e3089019400addb12c5a2, server=36236b0d7605,38295,1733966101221 in 170 msec 2024-12-12T01:20:14,207 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=209 2024-12-12T01:20:14,207 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure ec3c0892b19bfc98e76c3f97d3d9e8e2, server=36236b0d7605,33883,1733966101479 in 169 msec 2024-12-12T01:20:14,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6897d1cee65e3089019400addb12c5a2, UNASSIGN in 176 msec 2024-12-12T01:20:14,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=207 2024-12-12T01:20:14,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=ec3c0892b19bfc98e76c3f97d3d9e8e2, UNASSIGN in 176 msec 2024-12-12T01:20:14,211 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-12-12T01:20:14,212 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 180 msec 2024-12-12T01:20:14,213 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966414213"}]},"ts":"1733966414213"} 2024-12-12T01:20:14,214 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-12T01:20:14,214 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-12T01:20:14,216 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 191 msec 2024-12-12T01:20:14,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-12T01:20:14,345 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-12T01:20:14,346 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,348 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,349 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=212, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,351 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,353 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:14,353 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:14,355 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2/recovered.edits] 2024-12-12T01:20:14,355 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2/recovered.edits] 2024-12-12T01:20:14,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,356 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-12T01:20:14,356 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-12T01:20:14,357 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-12T01:20:14,357 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-12T01:20:14,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:14,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:14,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:14,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-12T01:20:14,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:14,362 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2/cf/ba50bb0d3d164cd38543c4ca83e221c7 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2/cf/ba50bb0d3d164cd38543c4ca83e221c7 2024-12-12T01:20:14,362 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2/cf/ffb99fcb80db43f9a982bcd5113ea4d9 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2/cf/ffb99fcb80db43f9a982bcd5113ea4d9 2024-12-12T01:20:14,365 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2/recovered.edits/9.seqid 2024-12-12T01:20:14,366 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2/recovered.edits/9.seqid 2024-12-12T01:20:14,366 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/6897d1cee65e3089019400addb12c5a2 2024-12-12T01:20:14,366 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testEmptyExportFileSystemState/ec3c0892b19bfc98e76c3f97d3d9e8e2 2024-12-12T01:20:14,366 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-12T01:20:14,366 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:14,367 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:14,367 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:14,367 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:14,370 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=212, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,372 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-12T01:20:14,375 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-12T01:20:14,376 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=212, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,376 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-12T01:20:14,376 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966414376"}]},"ts":"9223372036854775807"} 2024-12-12T01:20:14,376 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966414376"}]},"ts":"9223372036854775807"} 2024-12-12T01:20:14,378 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-12T01:20:14,378 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 6897d1cee65e3089019400addb12c5a2, NAME => 'testtb-testEmptyExportFileSystemState,,1733966401238.6897d1cee65e3089019400addb12c5a2.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ec3c0892b19bfc98e76c3f97d3d9e8e2, NAME => 'testtb-testEmptyExportFileSystemState,1,1733966401238.ec3c0892b19bfc98e76c3f97d3d9e8e2.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T01:20:14,378 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-12T01:20:14,378 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733966414378"}]},"ts":"9223372036854775807"} 2024-12-12T01:20:14,380 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-12T01:20:14,380 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=212, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,381 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 34 msec 2024-12-12T01:20:14,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-12T01:20:14,465 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-12-12T01:20:14,465 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-12T01:20:14,471 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-12T01:20:14,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:14,474 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-12T01:20:14,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-12T01:20:14,518 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=809 (was 798) Potentially hanging thread: process reaper (pid 8940) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:36795 from appattempt_1733966109028_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:56822 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:32935 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:53004 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:34206 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6800 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1669304993_1 at /127.0.0.1:49980 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32935 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=790 (was 767) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=590 (was 600), ProcessCount=17 (was 14) - ProcessCount LEAK? -, AvailableMemoryMB=5706 (was 6183) 2024-12-12T01:20:14,518 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-12-12T01:20:14,538 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=809, OpenFileDescriptor=790, MaxFileDescriptor=1048576, SystemLoadAverage=590, ProcessCount=17, AvailableMemoryMB=5703 2024-12-12T01:20:14,538 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-12-12T01:20:14,540 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T01:20:14,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-12T01:20:14,542 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T01:20:14,542 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:20:14,542 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 213 2024-12-12T01:20:14,543 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T01:20:14,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-12T01:20:14,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742279_1455 (size=404) 2024-12-12T01:20:14,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742279_1455 (size=404) 2024-12-12T01:20:14,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742279_1455 (size=404) 2024-12-12T01:20:14,551 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ccca775fd0cd0e7c92be4d830616669d, NAME => 'testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:20:14,551 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => db1338a217366846c95e41df7545534c, NAME => 'testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:20:14,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742280_1456 (size=65) 2024-12-12T01:20:14,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742280_1456 (size=65) 2024-12-12T01:20:14,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742280_1456 (size=65) 2024-12-12T01:20:14,563 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:20:14,563 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing db1338a217366846c95e41df7545534c, disabling compactions & flushes 2024-12-12T01:20:14,563 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. 2024-12-12T01:20:14,563 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. 2024-12-12T01:20:14,563 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. after waiting 0 ms 2024-12-12T01:20:14,563 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. 2024-12-12T01:20:14,563 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. 2024-12-12T01:20:14,564 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for db1338a217366846c95e41df7545534c: Waiting for close lock at 1733966414563Disabling compacts and flushes for region at 1733966414563Disabling writes for close at 1733966414563Writing region close event to WAL at 1733966414563Closed at 1733966414563 2024-12-12T01:20:14,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742281_1457 (size=65) 2024-12-12T01:20:14,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742281_1457 (size=65) 2024-12-12T01:20:14,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742281_1457 (size=65) 2024-12-12T01:20:14,566 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:20:14,566 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing ccca775fd0cd0e7c92be4d830616669d, disabling compactions & flushes 2024-12-12T01:20:14,566 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:20:14,566 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:20:14,566 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. after waiting 0 ms 2024-12-12T01:20:14,566 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:20:14,566 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:20:14,566 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for ccca775fd0cd0e7c92be4d830616669d: Waiting for close lock at 1733966414566Disabling compacts and flushes for region at 1733966414566Disabling writes for close at 1733966414566Writing region close event to WAL at 1733966414566Closed at 1733966414566 2024-12-12T01:20:14,567 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T01:20:14,567 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733966414567"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966414567"}]},"ts":"1733966414567"} 2024-12-12T01:20:14,567 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733966414567"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966414567"}]},"ts":"1733966414567"} 2024-12-12T01:20:14,569 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-12T01:20:14,570 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T01:20:14,570 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966414570"}]},"ts":"1733966414570"} 2024-12-12T01:20:14,572 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-12T01:20:14,572 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:20:14,573 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:20:14,573 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:20:14,573 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:20:14,573 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:20:14,573 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:20:14,573 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:20:14,573 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:20:14,573 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:20:14,573 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:20:14,573 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:20:14,574 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ccca775fd0cd0e7c92be4d830616669d, ASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=db1338a217366846c95e41df7545534c, ASSIGN}] 2024-12-12T01:20:14,575 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=db1338a217366846c95e41df7545534c, ASSIGN 2024-12-12T01:20:14,575 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ccca775fd0cd0e7c92be4d830616669d, ASSIGN 2024-12-12T01:20:14,576 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ccca775fd0cd0e7c92be4d830616669d, ASSIGN; state=OFFLINE, location=36236b0d7605,38295,1733966101221; forceNewPlan=false, retain=false 2024-12-12T01:20:14,576 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=db1338a217366846c95e41df7545534c, ASSIGN; state=OFFLINE, location=36236b0d7605,33883,1733966101479; forceNewPlan=false, retain=false 2024-12-12T01:20:14,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-12T01:20:14,726 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T01:20:14,726 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=ccca775fd0cd0e7c92be4d830616669d, regionState=OPENING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:20:14,726 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=db1338a217366846c95e41df7545534c, regionState=OPENING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:20:14,728 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ccca775fd0cd0e7c92be4d830616669d, ASSIGN because future has completed 2024-12-12T01:20:14,729 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure ccca775fd0cd0e7c92be4d830616669d, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:20:14,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=db1338a217366846c95e41df7545534c, ASSIGN because future has completed 2024-12-12T01:20:14,729 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure db1338a217366846c95e41df7545534c, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:20:14,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-12T01:20:14,883 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:20:14,884 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7752): Opening region: {ENCODED => ccca775fd0cd0e7c92be4d830616669d, NAME => 'testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T01:20:14,884 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. service=AccessControlService 2024-12-12T01:20:14,884 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. 2024-12-12T01:20:14,884 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7752): Opening region: {ENCODED => db1338a217366846c95e41df7545534c, NAME => 'testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T01:20:14,885 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:20:14,885 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. service=AccessControlService 2024-12-12T01:20:14,885 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:20:14,885 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:14,885 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum db1338a217366846c95e41df7545534c 2024-12-12T01:20:14,885 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:20:14,885 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:20:14,885 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7794): checking encryption for ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:14,885 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7794): checking encryption for db1338a217366846c95e41df7545534c 2024-12-12T01:20:14,885 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7797): checking classloading for db1338a217366846c95e41df7545534c 2024-12-12T01:20:14,885 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7797): checking classloading for ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:14,886 INFO [StoreOpener-db1338a217366846c95e41df7545534c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region db1338a217366846c95e41df7545534c 2024-12-12T01:20:14,887 INFO [StoreOpener-ccca775fd0cd0e7c92be4d830616669d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:14,888 INFO [StoreOpener-db1338a217366846c95e41df7545534c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region db1338a217366846c95e41df7545534c columnFamilyName cf 2024-12-12T01:20:14,888 DEBUG [StoreOpener-db1338a217366846c95e41df7545534c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:20:14,889 INFO [StoreOpener-db1338a217366846c95e41df7545534c-1 {}] regionserver.HStore(327): Store=db1338a217366846c95e41df7545534c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:20:14,889 INFO [StoreOpener-ccca775fd0cd0e7c92be4d830616669d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ccca775fd0cd0e7c92be4d830616669d columnFamilyName cf 2024-12-12T01:20:14,889 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1038): replaying wal for db1338a217366846c95e41df7545534c 2024-12-12T01:20:14,889 DEBUG [StoreOpener-ccca775fd0cd0e7c92be4d830616669d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:20:14,889 INFO [StoreOpener-ccca775fd0cd0e7c92be4d830616669d-1 {}] regionserver.HStore(327): Store=ccca775fd0cd0e7c92be4d830616669d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:20:14,890 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1038): replaying wal for ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:14,890 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c 2024-12-12T01:20:14,890 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:14,891 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:14,891 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c 2024-12-12T01:20:14,891 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1048): stopping wal replay for ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:14,891 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1060): Cleaning up temporary data for ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:14,891 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1048): stopping wal replay for db1338a217366846c95e41df7545534c 2024-12-12T01:20:14,891 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1060): Cleaning up temporary data for db1338a217366846c95e41df7545534c 2024-12-12T01:20:14,893 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1093): writing seq id for db1338a217366846c95e41df7545534c 2024-12-12T01:20:14,893 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1093): writing seq id for ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:14,895 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:20:14,895 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:20:14,895 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1114): Opened db1338a217366846c95e41df7545534c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59995748, jitterRate=-0.10599368810653687}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:20:14,895 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1122): Running coprocessor post-open hooks for db1338a217366846c95e41df7545534c 2024-12-12T01:20:14,895 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1114): Opened ccca775fd0cd0e7c92be4d830616669d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73155039, jitterRate=0.09009502828121185}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:20:14,895 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:14,896 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1006): Region open journal for db1338a217366846c95e41df7545534c: Running coprocessor pre-open hook at 1733966414885Writing region info on filesystem at 1733966414885Initializing all the Stores at 1733966414886 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966414886Cleaning up temporary data from old regions at 1733966414891 (+5 ms)Running coprocessor post-open hooks at 1733966414895 (+4 ms)Region opened successfully at 1733966414896 (+1 ms) 2024-12-12T01:20:14,896 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1006): Region open journal for ccca775fd0cd0e7c92be4d830616669d: Running coprocessor pre-open hook at 1733966414885Writing region info on filesystem at 1733966414885Initializing all the Stores at 1733966414886 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966414886Cleaning up temporary data from old regions at 1733966414891 (+5 ms)Running coprocessor post-open hooks at 1733966414895 (+4 ms)Region opened successfully at 1733966414896 (+1 ms) 2024-12-12T01:20:14,896 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d., pid=216, masterSystemTime=1733966414880 2024-12-12T01:20:14,896 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c., pid=217, masterSystemTime=1733966414881 2024-12-12T01:20:14,898 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:20:14,899 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:20:14,899 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=ccca775fd0cd0e7c92be4d830616669d, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:20:14,899 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. 2024-12-12T01:20:14,899 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. 2024-12-12T01:20:14,901 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=db1338a217366846c95e41df7545534c, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:20:14,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure ccca775fd0cd0e7c92be4d830616669d, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:20:14,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure db1338a217366846c95e41df7545534c, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:20:14,904 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=214 2024-12-12T01:20:14,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=214, state=SUCCESS, hasLock=false; OpenRegionProcedure ccca775fd0cd0e7c92be4d830616669d, server=36236b0d7605,38295,1733966101221 in 173 msec 2024-12-12T01:20:14,905 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ccca775fd0cd0e7c92be4d830616669d, ASSIGN in 330 msec 2024-12-12T01:20:14,905 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=215 2024-12-12T01:20:14,906 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=215, state=SUCCESS, hasLock=false; OpenRegionProcedure db1338a217366846c95e41df7545534c, server=36236b0d7605,33883,1733966101479 in 174 msec 2024-12-12T01:20:14,907 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=215, resume processing ppid=213 2024-12-12T01:20:14,907 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=db1338a217366846c95e41df7545534c, ASSIGN in 331 msec 2024-12-12T01:20:14,908 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T01:20:14,908 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966414908"}]},"ts":"1733966414908"} 2024-12-12T01:20:14,910 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-12T01:20:14,911 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T01:20:14,911 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-12T01:20:14,915 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-12T01:20:14,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:14,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:14,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:14,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:20:14,920 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:14,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T01:20:14,923 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:14,923 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:14,924 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:14,924 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:14,924 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:14,924 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:14,924 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-12T01:20:14,928 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 382 msec 2024-12-12T01:20:15,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-12T01:20:15,165 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-12T01:20:15,165 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-12-12T01:20:15,165 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:20:15,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44743 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32829 bytes) of info 2024-12-12T01:20:15,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-12-12T01:20:15,174 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:20:15,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithChecksum assigned. 2024-12-12T01:20:15,174 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-12T01:20:15,178 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-12T01:20:15,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966415178 (current time:1733966415178). 2024-12-12T01:20:15,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:20:15,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-12T01:20:15,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:20:15,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dcabd90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:15,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:20:15,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:20:15,181 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:20:15,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:20:15,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:20:15,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c830c35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:15,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:20:15,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:20:15,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:15,183 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59654, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:20:15,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1eaa57bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:15,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:20:15,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:20:15,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:20:15,186 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37298, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:20:15,188 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:20:15,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:20:15,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:15,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:15,188 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:20:15,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35a75858, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:15,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:20:15,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:20:15,191 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:20:15,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:20:15,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:20:15,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b73cfaf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:15,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:20:15,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:20:15,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:15,193 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59674, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:20:15,194 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62e8dd8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:15,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:20:15,197 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:20:15,198 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:20:15,198 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37306, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:20:15,201 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:20:15,203 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:20:15,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:20:15,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:15,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:15,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-12T01:20:15,204 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:20:15,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:20:15,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-12T01:20:15,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-12T01:20:15,208 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:20:15,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-12T01:20:15,209 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:20:15,212 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:20:15,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742282_1458 (size=161) 2024-12-12T01:20:15,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742282_1458 (size=161) 2024-12-12T01:20:15,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742282_1458 (size=161) 2024-12-12T01:20:15,224 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:20:15,224 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ccca775fd0cd0e7c92be4d830616669d}, {pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure db1338a217366846c95e41df7545534c}] 2024-12-12T01:20:15,225 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:15,225 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure db1338a217366846c95e41df7545534c 2024-12-12T01:20:15,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-12T01:20:15,377 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=220 2024-12-12T01:20:15,377 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=219 2024-12-12T01:20:15,377 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. 2024-12-12T01:20:15,377 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:20:15,377 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.HRegion(2603): Flush status journal for db1338a217366846c95e41df7545534c: 2024-12-12T01:20:15,377 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.HRegion(2603): Flush status journal for ccca775fd0cd0e7c92be4d830616669d: 2024-12-12T01:20:15,377 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. for emptySnaptb0-testExportWithChecksum completed. 2024-12-12T01:20:15,377 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. for emptySnaptb0-testExportWithChecksum completed. 2024-12-12T01:20:15,378 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-12T01:20:15,378 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-12T01:20:15,378 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:20:15,378 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:20:15,378 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:20:15,378 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:20:15,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742283_1459 (size=68) 2024-12-12T01:20:15,388 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:20:15,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=219 2024-12-12T01:20:15,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742283_1459 (size=68) 2024-12-12T01:20:15,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742283_1459 (size=68) 2024-12-12T01:20:15,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=219 2024-12-12T01:20:15,389 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:15,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742284_1460 (size=68) 2024-12-12T01:20:15,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742284_1460 (size=68) 2024-12-12T01:20:15,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742284_1460 (size=68) 2024-12-12T01:20:15,396 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. 2024-12-12T01:20:15,396 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=220 2024-12-12T01:20:15,396 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:15,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=220 2024-12-12T01:20:15,397 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region db1338a217366846c95e41df7545534c 2024-12-12T01:20:15,397 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure db1338a217366846c95e41df7545534c 2024-12-12T01:20:15,399 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ccca775fd0cd0e7c92be4d830616669d in 173 msec 2024-12-12T01:20:15,400 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=220, resume processing ppid=218 2024-12-12T01:20:15,400 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:20:15,400 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure db1338a217366846c95e41df7545534c in 174 msec 2024-12-12T01:20:15,400 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:20:15,401 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:20:15,401 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-12T01:20:15,402 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-12T01:20:15,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742285_1461 (size=543) 2024-12-12T01:20:15,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742285_1461 (size=543) 2024-12-12T01:20:15,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742285_1461 (size=543) 2024-12-12T01:20:15,422 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:20:15,427 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:20:15,427 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-12T01:20:15,429 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:20:15,429 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-12T01:20:15,430 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 223 msec 2024-12-12T01:20:15,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-12T01:20:15,525 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-12T01:20:15,533 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='0646598a7537d2ce06cb63080ea8879b0', locateType=CURRENT is [region=testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:20:15,534 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='181a834cd6a1f7742931e3e6d156fb5af', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:20:15,538 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='217fea6a1c6c3fbc27abd462f6f5db038', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:20:15,539 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='3bfb0d7888fb8edf915d8c5e77dedd528', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:20:15,540 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='4fef16522a7c9c7c9257bbf80ea162daf', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:20:15,545 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:20:15,545 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33883 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:20:15,546 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-12T01:20:15,549 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-12T01:20:15,549 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:20:15,549 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:20:15,551 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-12T01:20:15,558 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-12T01:20:15,565 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-12T01:20:15,569 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-12T01:20:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966415569 (current time:1733966415569). 2024-12-12T01:20:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:20:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-12T01:20:15,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:20:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e5d3cab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:20:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:20:15,571 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:20:15,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:20:15,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:20:15,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@107f8eaa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:15,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:20:15,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:20:15,572 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:15,574 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59684, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:20:15,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77e662ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:15,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:20:15,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:20:15,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:20:15,579 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37314, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:20:15,580 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:20:15,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:20:15,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:15,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:15,581 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:20:15,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15201e2b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:15,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:20:15,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:20:15,585 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:20:15,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:20:15,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:20:15,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@154a02ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:15,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:20:15,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:20:15,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:15,587 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59704, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:20:15,587 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@734d300a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:20:15,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:20:15,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:20:15,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:20:15,590 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37328, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:20:15,592 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:20:15,594 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:20:15,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:20:15,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:15,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:20:15,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-12T01:20:15,595 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:20:15,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:20:15,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-12T01:20:15,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-12T01:20:15,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-12T01:20:15,599 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:20:15,600 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:20:15,602 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:20:15,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742286_1462 (size=156) 2024-12-12T01:20:15,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742286_1462 (size=156) 2024-12-12T01:20:15,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742286_1462 (size=156) 2024-12-12T01:20:15,624 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:20:15,624 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ccca775fd0cd0e7c92be4d830616669d}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure db1338a217366846c95e41df7545534c}] 2024-12-12T01:20:15,625 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:15,625 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure db1338a217366846c95e41df7545534c 2024-12-12T01:20:15,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-12T01:20:15,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-12-12T01:20:15,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-12-12T01:20:15,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. 2024-12-12T01:20:15,778 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:20:15,778 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2902): Flushing ccca775fd0cd0e7c92be4d830616669d 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-12T01:20:15,778 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2902): Flushing db1338a217366846c95e41df7545534c 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-12T01:20:15,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/.tmp/cf/73c03e8cb1b24a43aec401517fc362e2 is 71, key is 0527fe50fb7e7328acf8730b3d54de69/cf:q/1733966415544/Put/seqid=0 2024-12-12T01:20:15,803 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/.tmp/cf/2827a2cc09464cc4913d7007c6acafcb is 71, key is 114f2d5ff7027de0faeb7139a46c045c/cf:q/1733966415545/Put/seqid=0 2024-12-12T01:20:15,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742287_1463 (size=5286) 2024-12-12T01:20:15,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742287_1463 (size=5286) 2024-12-12T01:20:15,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742287_1463 (size=5286) 2024-12-12T01:20:15,810 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/.tmp/cf/73c03e8cb1b24a43aec401517fc362e2 2024-12-12T01:20:15,816 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/.tmp/cf/73c03e8cb1b24a43aec401517fc362e2 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/cf/73c03e8cb1b24a43aec401517fc362e2 2024-12-12T01:20:15,822 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/cf/73c03e8cb1b24a43aec401517fc362e2, entries=3, sequenceid=6, filesize=5.2 K 2024-12-12T01:20:15,823 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for ccca775fd0cd0e7c92be4d830616669d in 45ms, sequenceid=6, compaction requested=false 2024-12-12T01:20:15,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-12T01:20:15,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for ccca775fd0cd0e7c92be4d830616669d: 2024-12-12T01:20:15,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. for snaptb0-testExportWithChecksum completed. 2024-12-12T01:20:15,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-12T01:20:15,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:20:15,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/cf/73c03e8cb1b24a43aec401517fc362e2] hfiles 2024-12-12T01:20:15,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/cf/73c03e8cb1b24a43aec401517fc362e2 for snapshot=snaptb0-testExportWithChecksum 2024-12-12T01:20:15,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742288_1464 (size=8324) 2024-12-12T01:20:15,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742288_1464 (size=8324) 2024-12-12T01:20:15,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742288_1464 (size=8324) 2024-12-12T01:20:15,831 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/.tmp/cf/2827a2cc09464cc4913d7007c6acafcb 2024-12-12T01:20:15,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742289_1465 (size=107) 2024-12-12T01:20:15,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742289_1465 (size=107) 2024-12-12T01:20:15,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742289_1465 (size=107) 2024-12-12T01:20:15,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:20:15,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-12-12T01:20:15,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/.tmp/cf/2827a2cc09464cc4913d7007c6acafcb as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/cf/2827a2cc09464cc4913d7007c6acafcb 2024-12-12T01:20:15,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-12-12T01:20:15,838 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:15,838 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:20:15,841 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ccca775fd0cd0e7c92be4d830616669d in 215 msec 2024-12-12T01:20:15,843 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/cf/2827a2cc09464cc4913d7007c6acafcb, entries=47, sequenceid=6, filesize=8.1 K 2024-12-12T01:20:15,844 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for db1338a217366846c95e41df7545534c in 66ms, sequenceid=6, compaction requested=false 2024-12-12T01:20:15,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for db1338a217366846c95e41df7545534c: 2024-12-12T01:20:15,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. for snaptb0-testExportWithChecksum completed. 2024-12-12T01:20:15,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-12T01:20:15,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:20:15,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/cf/2827a2cc09464cc4913d7007c6acafcb] hfiles 2024-12-12T01:20:15,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/cf/2827a2cc09464cc4913d7007c6acafcb for snapshot=snaptb0-testExportWithChecksum 2024-12-12T01:20:15,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742290_1466 (size=107) 2024-12-12T01:20:15,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742290_1466 (size=107) 2024-12-12T01:20:15,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742290_1466 (size=107) 2024-12-12T01:20:15,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. 2024-12-12T01:20:15,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-12-12T01:20:15,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-12-12T01:20:15,856 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region db1338a217366846c95e41df7545534c 2024-12-12T01:20:15,856 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure db1338a217366846c95e41df7545534c 2024-12-12T01:20:15,859 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=223, resume processing ppid=221 2024-12-12T01:20:15,859 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:20:15,859 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure db1338a217366846c95e41df7545534c in 233 msec 2024-12-12T01:20:15,859 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:20:15,860 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:20:15,860 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-12T01:20:15,861 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-12T01:20:15,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742291_1467 (size=621) 2024-12-12T01:20:15,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742291_1467 (size=621) 2024-12-12T01:20:15,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742291_1467 (size=621) 2024-12-12T01:20:15,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-12T01:20:16,215 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:20:16,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-12T01:20:16,289 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:20:16,295 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:20:16,295 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-12T01:20:16,296 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:20:16,297 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-12T01:20:16,298 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 701 msec 2024-12-12T01:20:16,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-12T01:20:16,735 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-12T01:20:16,735 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966416735 2024-12-12T01:20:16,736 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966416735, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966416735, srcFsUri=hdfs://localhost:33353, srcDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:20:16,780 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33353, inputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:20:16,780 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@24f86f6a, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966416735, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966416735/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-12T01:20:16,782 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-12T01:20:16,786 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966416735/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-12T01:20:16,814 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:16,814 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:16,815 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:18,020 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-10451726894573884884.jar 2024-12-12T01:20:18,020 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:18,021 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:18,088 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-17014206109429824905.jar 2024-12-12T01:20:18,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:18,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:18,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:18,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:18,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:18,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:20:18,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T01:20:18,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T01:20:18,091 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T01:20:18,091 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T01:20:18,091 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T01:20:18,091 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T01:20:18,092 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T01:20:18,092 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T01:20:18,092 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T01:20:18,092 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T01:20:18,092 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T01:20:18,093 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:20:18,093 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:20:18,093 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:20:18,093 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:20:18,094 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:20:18,094 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:20:18,094 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:20:18,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742292_1468 (size=131440) 2024-12-12T01:20:18,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742292_1468 (size=131440) 2024-12-12T01:20:18,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742292_1468 (size=131440) 2024-12-12T01:20:18,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742293_1469 (size=443172) 2024-12-12T01:20:18,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742293_1469 (size=443172) 2024-12-12T01:20:18,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742293_1469 (size=443172) 2024-12-12T01:20:18,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742294_1470 (size=4188619) 2024-12-12T01:20:18,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742294_1470 (size=4188619) 2024-12-12T01:20:18,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742294_1470 (size=4188619) 2024-12-12T01:20:18,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742295_1471 (size=1323991) 2024-12-12T01:20:18,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742295_1471 (size=1323991) 2024-12-12T01:20:18,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742295_1471 (size=1323991) 2024-12-12T01:20:18,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742296_1472 (size=903541) 2024-12-12T01:20:18,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742296_1472 (size=903541) 2024-12-12T01:20:18,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742296_1472 (size=903541) 2024-12-12T01:20:18,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742297_1473 (size=8360360) 2024-12-12T01:20:18,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742297_1473 (size=8360360) 2024-12-12T01:20:18,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742297_1473 (size=8360360) 2024-12-12T01:20:18,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742298_1474 (size=1877034) 2024-12-12T01:20:18,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742298_1474 (size=1877034) 2024-12-12T01:20:18,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742298_1474 (size=1877034) 2024-12-12T01:20:18,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742299_1475 (size=77835) 2024-12-12T01:20:18,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742299_1475 (size=77835) 2024-12-12T01:20:18,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742299_1475 (size=77835) 2024-12-12T01:20:18,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742300_1476 (size=30949) 2024-12-12T01:20:18,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742300_1476 (size=30949) 2024-12-12T01:20:18,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742300_1476 (size=30949) 2024-12-12T01:20:18,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742301_1477 (size=1597213) 2024-12-12T01:20:18,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742301_1477 (size=1597213) 2024-12-12T01:20:18,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742301_1477 (size=1597213) 2024-12-12T01:20:18,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742302_1478 (size=4695811) 2024-12-12T01:20:18,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742302_1478 (size=4695811) 2024-12-12T01:20:18,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742302_1478 (size=4695811) 2024-12-12T01:20:18,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742303_1479 (size=232957) 2024-12-12T01:20:18,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742303_1479 (size=232957) 2024-12-12T01:20:18,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742303_1479 (size=232957) 2024-12-12T01:20:18,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742304_1480 (size=127628) 2024-12-12T01:20:18,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742304_1480 (size=127628) 2024-12-12T01:20:18,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742304_1480 (size=127628) 2024-12-12T01:20:18,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742305_1481 (size=20406) 2024-12-12T01:20:18,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742305_1481 (size=20406) 2024-12-12T01:20:18,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742305_1481 (size=20406) 2024-12-12T01:20:18,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742306_1482 (size=5175431) 2024-12-12T01:20:18,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742306_1482 (size=5175431) 2024-12-12T01:20:18,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742306_1482 (size=5175431) 2024-12-12T01:20:18,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742307_1483 (size=217634) 2024-12-12T01:20:18,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742307_1483 (size=217634) 2024-12-12T01:20:18,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742307_1483 (size=217634) 2024-12-12T01:20:18,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742308_1484 (size=1832290) 2024-12-12T01:20:18,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742308_1484 (size=1832290) 2024-12-12T01:20:18,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742308_1484 (size=1832290) 2024-12-12T01:20:18,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742309_1485 (size=322274) 2024-12-12T01:20:18,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742309_1485 (size=322274) 2024-12-12T01:20:18,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742309_1485 (size=322274) 2024-12-12T01:20:18,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742310_1486 (size=503880) 2024-12-12T01:20:18,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742310_1486 (size=503880) 2024-12-12T01:20:18,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742310_1486 (size=503880) 2024-12-12T01:20:18,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742311_1487 (size=29229) 2024-12-12T01:20:18,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742311_1487 (size=29229) 2024-12-12T01:20:18,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742311_1487 (size=29229) 2024-12-12T01:20:18,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742312_1488 (size=24096) 2024-12-12T01:20:18,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742312_1488 (size=24096) 2024-12-12T01:20:18,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742312_1488 (size=24096) 2024-12-12T01:20:18,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742313_1489 (size=111872) 2024-12-12T01:20:18,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742313_1489 (size=111872) 2024-12-12T01:20:18,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742313_1489 (size=111872) 2024-12-12T01:20:18,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742314_1490 (size=6426224) 2024-12-12T01:20:18,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742314_1490 (size=6426224) 2024-12-12T01:20:18,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742314_1490 (size=6426224) 2024-12-12T01:20:18,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742315_1491 (size=45609) 2024-12-12T01:20:18,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742315_1491 (size=45609) 2024-12-12T01:20:18,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742315_1491 (size=45609) 2024-12-12T01:20:18,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742316_1492 (size=136454) 2024-12-12T01:20:18,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742316_1492 (size=136454) 2024-12-12T01:20:18,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742316_1492 (size=136454) 2024-12-12T01:20:18,774 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T01:20:18,777 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-12T01:20:18,786 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-12T01:20:18,786 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-12T01:20:18,822 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0008_000001 (auth:SIMPLE) from 127.0.0.1:41544 2024-12-12T01:20:18,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742317_1493 (size=441) 2024-12-12T01:20:18,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742317_1493 (size=441) 2024-12-12T01:20:18,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742317_1493 (size=441) 2024-12-12T01:20:18,854 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0008/container_1733966109028_0008_01_000001/launch_container.sh] 2024-12-12T01:20:18,854 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0008/container_1733966109028_0008_01_000001/container_tokens] 2024-12-12T01:20:18,854 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0008/container_1733966109028_0008_01_000001/sysfs] 2024-12-12T01:20:18,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742318_1494 (size=21) 2024-12-12T01:20:18,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742318_1494 (size=21) 2024-12-12T01:20:18,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742318_1494 (size=21) 2024-12-12T01:20:18,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742319_1495 (size=304048) 2024-12-12T01:20:18,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742319_1495 (size=304048) 2024-12-12T01:20:18,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742319_1495 (size=304048) 2024-12-12T01:20:19,321 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:20:19,321 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:20:19,361 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0009_000001 (auth:SIMPLE) from 127.0.0.1:58524 2024-12-12T01:20:20,711 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-12T01:20:20,711 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-12T01:20:20,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-12T01:20:26,214 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:20:28,492 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0009_000001 (auth:SIMPLE) from 127.0.0.1:44158 2024-12-12T01:20:28,912 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:20:29,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742320_1496 (size=349746) 2024-12-12T01:20:29,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742320_1496 (size=349746) 2024-12-12T01:20:29,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742320_1496 (size=349746) 2024-12-12T01:20:30,769 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0009_000001 (auth:SIMPLE) from 127.0.0.1:33892 2024-12-12T01:20:30,769 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0009_000001 (auth:SIMPLE) from 127.0.0.1:40582 2024-12-12T01:20:34,503 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d644bbcb843c6264137c6748d170fd67, had cached 0 bytes from a total of 8324 2024-12-12T01:20:34,503 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e4e1b654a5a995b6e71de7ab606443fe, had cached 0 bytes from a total of 5288 2024-12-12T01:20:38,056 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_3/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000003/launch_container.sh] 2024-12-12T01:20:38,056 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_3/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000003/container_tokens] 2024-12-12T01:20:38,056 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_3/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/cf/73c03e8cb1b24a43aec401517fc362e2 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966416735/archive/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/cf/73c03e8cb1b24a43aec401517fc362e2. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-12T01:20:39,669 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0009_000001 (auth:SIMPLE) from 127.0.0.1:43044 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/cf/2827a2cc09464cc4913d7007c6acafcb and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966416735/archive/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/cf/2827a2cc09464cc4913d7007c6acafcb. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-12T01:20:41,691 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0009_000001 (auth:SIMPLE) from 127.0.0.1:43052 2024-12-12T01:20:45,932 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000002/launch_container.sh] 2024-12-12T01:20:45,932 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000002/container_tokens] 2024-12-12T01:20:45,932 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/cf/73c03e8cb1b24a43aec401517fc362e2 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966416735/archive/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/cf/73c03e8cb1b24a43aec401517fc362e2. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-12T01:20:47,775 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000004/launch_container.sh] 2024-12-12T01:20:47,775 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000004/container_tokens] 2024-12-12T01:20:47,775 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000004/sysfs] 2024-12-12T01:20:48,754 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0009_000001 (auth:SIMPLE) from 127.0.0.1:48120 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/cf/2827a2cc09464cc4913d7007c6acafcb and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966416735/archive/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/cf/2827a2cc09464cc4913d7007c6acafcb. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-12T01:20:51,723 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0009_000001 (auth:SIMPLE) from 127.0.0.1:48132 2024-12-12T01:20:55,843 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_1/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000005/launch_container.sh] 2024-12-12T01:20:55,843 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_1/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000005/container_tokens] 2024-12-12T01:20:55,843 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_1/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000005/sysfs] 2024-12-12T01:20:56,613 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_1/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000006/launch_container.sh] 2024-12-12T01:20:56,613 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_1/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000006/container_tokens] 2024-12-12T01:20:56,613 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_1/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000006/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/cf/73c03e8cb1b24a43aec401517fc362e2 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966416735/archive/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/cf/73c03e8cb1b24a43aec401517fc362e2. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-12T01:20:57,723 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0009_000001 (auth:SIMPLE) from 127.0.0.1:35262 2024-12-12T01:20:58,420 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000007/launch_container.sh] 2024-12-12T01:20:58,421 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000007/container_tokens] 2024-12-12T01:20:58,421 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_2/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000007/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/cf/2827a2cc09464cc4913d7007c6acafcb and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/local-export-1733966416735/archive/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/cf/2827a2cc09464cc4913d7007c6acafcb. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-12T01:20:58,913 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:20:59,731 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0009_000001 (auth:SIMPLE) from 127.0.0.1:35276 2024-12-12T01:20:59,885 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ccca775fd0cd0e7c92be4d830616669d, had cached 0 bytes from a total of 5286 2024-12-12T01:20:59,885 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region db1338a217366846c95e41df7545534c, had cached 0 bytes from a total of 8324 2024-12-12T01:21:01,159 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0009_000001 (auth:SIMPLE) from 127.0.0.1:35282 2024-12-12T01:21:01,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742321_1497 (size=30175) 2024-12-12T01:21:01,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742321_1497 (size=30175) 2024-12-12T01:21:01,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742321_1497 (size=30175) 2024-12-12T01:21:01,220 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733966109028_0009_01_000009 is : 143 2024-12-12T01:21:01,237 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_2/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000009/launch_container.sh] 2024-12-12T01:21:01,237 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_2/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000009/container_tokens] 2024-12-12T01:21:01,237 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_2/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000009/sysfs] 2024-12-12T01:21:01,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742322_1498 (size=460) 2024-12-12T01:21:01,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742322_1498 (size=460) 2024-12-12T01:21:01,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742322_1498 (size=460) 2024-12-12T01:21:01,261 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_2/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000008/launch_container.sh] 2024-12-12T01:21:01,261 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_2/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000008/container_tokens] 2024-12-12T01:21:01,261 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_2/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000008/sysfs] 2024-12-12T01:21:01,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742323_1499 (size=30175) 2024-12-12T01:21:01,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742323_1499 (size=30175) 2024-12-12T01:21:01,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742323_1499 (size=30175) 2024-12-12T01:21:01,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742324_1500 (size=349746) 2024-12-12T01:21:01,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742324_1500 (size=349746) 2024-12-12T01:21:01,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742324_1500 (size=349746) 2024-12-12T01:21:01,297 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0009_000001 (auth:SIMPLE) from 127.0.0.1:35294 2024-12-12T01:21:02,741 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733966109028_0009_m_000001 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T01:21:02,742 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966462742 2024-12-12T01:21:02,742 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33353, tgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966462742, rawTgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966462742, srcFsUri=hdfs://localhost:33353, srcDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:21:02,771 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33353, inputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:21:02,771 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966462742, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966462742/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-12T01:21:02,773 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-12T01:21:02,777 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966462742/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-12T01:21:02,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742325_1501 (size=156) 2024-12-12T01:21:02,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742325_1501 (size=156) 2024-12-12T01:21:02,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742325_1501 (size=156) 2024-12-12T01:21:02,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742326_1502 (size=621) 2024-12-12T01:21:02,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742326_1502 (size=621) 2024-12-12T01:21:02,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742326_1502 (size=621) 2024-12-12T01:21:02,788 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:02,788 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:02,788 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:03,417 WARN [regionserver/36236b0d7605:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 2, running: 1 2024-12-12T01:21:03,794 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-10259817558022833095.jar 2024-12-12T01:21:03,795 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:03,795 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:03,862 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-9037758601344843525.jar 2024-12-12T01:21:03,862 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:03,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:03,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:03,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:03,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:03,863 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:03,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T01:21:03,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T01:21:03,864 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T01:21:03,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T01:21:03,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T01:21:03,865 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T01:21:03,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T01:21:03,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T01:21:03,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T01:21:03,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T01:21:03,866 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T01:21:03,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:21:03,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:21:03,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:21:03,868 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:21:03,868 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:21:03,868 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:21:03,868 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:21:03,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742327_1503 (size=131440) 2024-12-12T01:21:03,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742327_1503 (size=131440) 2024-12-12T01:21:03,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742327_1503 (size=131440) 2024-12-12T01:21:03,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742328_1504 (size=4188619) 2024-12-12T01:21:03,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742328_1504 (size=4188619) 2024-12-12T01:21:03,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742328_1504 (size=4188619) 2024-12-12T01:21:03,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742329_1505 (size=6426224) 2024-12-12T01:21:03,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742329_1505 (size=6426224) 2024-12-12T01:21:03,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742329_1505 (size=6426224) 2024-12-12T01:21:03,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742330_1506 (size=1323991) 2024-12-12T01:21:03,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742330_1506 (size=1323991) 2024-12-12T01:21:03,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742330_1506 (size=1323991) 2024-12-12T01:21:03,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742331_1507 (size=903541) 2024-12-12T01:21:03,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742331_1507 (size=903541) 2024-12-12T01:21:04,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742331_1507 (size=903541) 2024-12-12T01:21:04,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742332_1508 (size=8360360) 2024-12-12T01:21:04,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742332_1508 (size=8360360) 2024-12-12T01:21:04,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742332_1508 (size=8360360) 2024-12-12T01:21:04,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742333_1509 (size=1877034) 2024-12-12T01:21:04,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742333_1509 (size=1877034) 2024-12-12T01:21:04,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742333_1509 (size=1877034) 2024-12-12T01:21:04,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742334_1510 (size=77835) 2024-12-12T01:21:04,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742334_1510 (size=77835) 2024-12-12T01:21:04,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742334_1510 (size=77835) 2024-12-12T01:21:04,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742335_1511 (size=30949) 2024-12-12T01:21:04,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742335_1511 (size=30949) 2024-12-12T01:21:04,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742335_1511 (size=30949) 2024-12-12T01:21:04,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742336_1512 (size=1597213) 2024-12-12T01:21:04,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742336_1512 (size=1597213) 2024-12-12T01:21:04,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742336_1512 (size=1597213) 2024-12-12T01:21:04,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742337_1513 (size=4695811) 2024-12-12T01:21:04,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742337_1513 (size=4695811) 2024-12-12T01:21:04,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742337_1513 (size=4695811) 2024-12-12T01:21:04,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742338_1514 (size=232957) 2024-12-12T01:21:04,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742338_1514 (size=232957) 2024-12-12T01:21:04,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742338_1514 (size=232957) 2024-12-12T01:21:04,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742339_1515 (size=127628) 2024-12-12T01:21:04,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742339_1515 (size=127628) 2024-12-12T01:21:04,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742339_1515 (size=127628) 2024-12-12T01:21:04,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742340_1516 (size=20406) 2024-12-12T01:21:04,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742340_1516 (size=20406) 2024-12-12T01:21:04,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742340_1516 (size=20406) 2024-12-12T01:21:04,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742341_1517 (size=5175431) 2024-12-12T01:21:04,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742341_1517 (size=5175431) 2024-12-12T01:21:04,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742341_1517 (size=5175431) 2024-12-12T01:21:04,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742342_1518 (size=217634) 2024-12-12T01:21:04,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742342_1518 (size=217634) 2024-12-12T01:21:04,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742342_1518 (size=217634) 2024-12-12T01:21:04,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742343_1519 (size=443172) 2024-12-12T01:21:04,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742343_1519 (size=443172) 2024-12-12T01:21:04,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742343_1519 (size=443172) 2024-12-12T01:21:04,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742344_1520 (size=1832290) 2024-12-12T01:21:04,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742344_1520 (size=1832290) 2024-12-12T01:21:04,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742344_1520 (size=1832290) 2024-12-12T01:21:04,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742345_1521 (size=322274) 2024-12-12T01:21:04,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742345_1521 (size=322274) 2024-12-12T01:21:04,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742345_1521 (size=322274) 2024-12-12T01:21:04,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742346_1522 (size=503880) 2024-12-12T01:21:04,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742346_1522 (size=503880) 2024-12-12T01:21:04,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742346_1522 (size=503880) 2024-12-12T01:21:04,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742347_1523 (size=29229) 2024-12-12T01:21:04,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742347_1523 (size=29229) 2024-12-12T01:21:04,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742347_1523 (size=29229) 2024-12-12T01:21:04,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742348_1524 (size=24096) 2024-12-12T01:21:04,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742348_1524 (size=24096) 2024-12-12T01:21:04,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742348_1524 (size=24096) 2024-12-12T01:21:04,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742349_1525 (size=111872) 2024-12-12T01:21:04,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742349_1525 (size=111872) 2024-12-12T01:21:04,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742349_1525 (size=111872) 2024-12-12T01:21:04,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742350_1526 (size=45609) 2024-12-12T01:21:04,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742350_1526 (size=45609) 2024-12-12T01:21:04,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742350_1526 (size=45609) 2024-12-12T01:21:04,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742351_1527 (size=136454) 2024-12-12T01:21:04,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742351_1527 (size=136454) 2024-12-12T01:21:04,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742351_1527 (size=136454) 2024-12-12T01:21:04,246 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T01:21:04,248 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-12T01:21:04,250 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-12T01:21:04,250 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-12T01:21:04,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742352_1528 (size=441) 2024-12-12T01:21:04,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742352_1528 (size=441) 2024-12-12T01:21:04,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742352_1528 (size=441) 2024-12-12T01:21:04,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742353_1529 (size=21) 2024-12-12T01:21:04,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742353_1529 (size=21) 2024-12-12T01:21:04,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742353_1529 (size=21) 2024-12-12T01:21:04,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742354_1530 (size=303998) 2024-12-12T01:21:04,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742354_1530 (size=303998) 2024-12-12T01:21:04,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742354_1530 (size=303998) 2024-12-12T01:21:05,210 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region db1338a217366846c95e41df7545534c changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:21:05,210 DEBUG [master/36236b0d7605:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ccca775fd0cd0e7c92be4d830616669d changed from -1.0 to 0.0, refreshing cache 2024-12-12T01:21:07,382 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:21:07,382 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:21:07,386 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0009_000001 (auth:SIMPLE) from 127.0.0.1:53626 2024-12-12T01:21:07,410 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0010_000001 (auth:SIMPLE) from 127.0.0.1:59558 2024-12-12T01:21:07,412 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_0/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000001/launch_container.sh] 2024-12-12T01:21:07,412 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_0/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000001/container_tokens] 2024-12-12T01:21:07,412 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_0/usercache/jenkins/appcache/application_1733966109028_0009/container_1733966109028_0009_01_000001/sysfs] 2024-12-12T01:21:07,850 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-12T01:21:07,851 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=76.94 KB heapSize=121.80 KB 2024-12-12T01:21:07,877 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/info/1602174d0d9a494eade10020a9b20b58 is 181, key is testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c./info:regioninfo/1733966414900/Put/seqid=0 2024-12-12T01:21:07,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742355_1531 (size=17550) 2024-12-12T01:21:07,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742355_1531 (size=17550) 2024-12-12T01:21:07,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742355_1531 (size=17550) 2024-12-12T01:21:07,894 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65.78 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/info/1602174d0d9a494eade10020a9b20b58 2024-12-12T01:21:07,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/ns/51c89466b0bf45cbb234a6d57bd96fb0 is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4./ns:/1733966386916/DeleteFamily/seqid=0 2024-12-12T01:21:07,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742356_1532 (size=7924) 2024-12-12T01:21:07,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742356_1532 (size=7924) 2024-12-12T01:21:07,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742356_1532 (size=7924) 2024-12-12T01:21:07,932 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.37 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/ns/51c89466b0bf45cbb234a6d57bd96fb0 2024-12-12T01:21:07,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/rep_barrier/8c2a5084780d4534a0645a389b2ac281 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4./rep_barrier:/1733966386916/DeleteFamily/seqid=0 2024-12-12T01:21:07,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742357_1533 (size=8195) 2024-12-12T01:21:07,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742357_1533 (size=8195) 2024-12-12T01:21:07,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742357_1533 (size=8195) 2024-12-12T01:21:07,972 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.49 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/rep_barrier/8c2a5084780d4534a0645a389b2ac281 2024-12-12T01:21:07,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/table/927a375e8ec847ffa2e7bae5ce2818c1 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733966366432.fd36ddb471cee7fb43bda32dd22b1fd4./table:/1733966386916/DeleteFamily/seqid=0 2024-12-12T01:21:08,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742358_1534 (size=9051) 2024-12-12T01:21:08,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742358_1534 (size=9051) 2024-12-12T01:21:08,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742358_1534 (size=9051) 2024-12-12T01:21:08,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/table/927a375e8ec847ffa2e7bae5ce2818c1 2024-12-12T01:21:08,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/info/1602174d0d9a494eade10020a9b20b58 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/info/1602174d0d9a494eade10020a9b20b58 2024-12-12T01:21:08,017 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/info/1602174d0d9a494eade10020a9b20b58, entries=96, sequenceid=211, filesize=17.1 K 2024-12-12T01:21:08,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/ns/51c89466b0bf45cbb234a6d57bd96fb0 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/ns/51c89466b0bf45cbb234a6d57bd96fb0 2024-12-12T01:21:08,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/ns/51c89466b0bf45cbb234a6d57bd96fb0, entries=24, sequenceid=211, filesize=7.7 K 2024-12-12T01:21:08,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/rep_barrier/8c2a5084780d4534a0645a389b2ac281 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/rep_barrier/8c2a5084780d4534a0645a389b2ac281 2024-12-12T01:21:08,033 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/rep_barrier/8c2a5084780d4534a0645a389b2ac281, entries=22, sequenceid=211, filesize=8.0 K 2024-12-12T01:21:08,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/table/927a375e8ec847ffa2e7bae5ce2818c1 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/table/927a375e8ec847ffa2e7bae5ce2818c1 2024-12-12T01:21:08,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/table/927a375e8ec847ffa2e7bae5ce2818c1, entries=39, sequenceid=211, filesize=8.8 K 2024-12-12T01:21:08,039 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~76.94 KB/78789, heapSize ~121.74 KB/124664, currentSize=0 B/0 for 1588230740 in 188ms, sequenceid=211, compaction requested=false 2024-12-12T01:21:08,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-12T01:21:13,710 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0010_000001 (auth:SIMPLE) from 127.0.0.1:47926 2024-12-12T01:21:14,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742359_1535 (size=349696) 2024-12-12T01:21:14,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742359_1535 (size=349696) 2024-12-12T01:21:14,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742359_1535 (size=349696) 2024-12-12T01:21:16,445 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0010_000001 (auth:SIMPLE) from 127.0.0.1:55542 2024-12-12T01:21:16,472 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0010_000001 (auth:SIMPLE) from 127.0.0.1:38114 2024-12-12T01:21:19,503 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d644bbcb843c6264137c6748d170fd67, had cached 0 bytes from a total of 8324 2024-12-12T01:21:19,503 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e4e1b654a5a995b6e71de7ab606443fe, had cached 0 bytes from a total of 5288 2024-12-12T01:21:23,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742360_1536 (size=5286) 2024-12-12T01:21:23,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742360_1536 (size=5286) 2024-12-12T01:21:23,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742360_1536 (size=5286) 2024-12-12T01:21:23,331 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_1/usercache/jenkins/appcache/application_1733966109028_0010/container_1733966109028_0010_01_000003/launch_container.sh] 2024-12-12T01:21:23,331 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_1/usercache/jenkins/appcache/application_1733966109028_0010/container_1733966109028_0010_01_000003/container_tokens] 2024-12-12T01:21:23,332 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_1/usercache/jenkins/appcache/application_1733966109028_0010/container_1733966109028_0010_01_000003/sysfs] 2024-12-12T01:21:24,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742362_1538 (size=8324) 2024-12-12T01:21:24,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742362_1538 (size=8324) 2024-12-12T01:21:24,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742362_1538 (size=8324) 2024-12-12T01:21:24,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742361_1537 (size=22153) 2024-12-12T01:21:24,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742361_1537 (size=22153) 2024-12-12T01:21:24,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742361_1537 (size=22153) 2024-12-12T01:21:24,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742363_1539 (size=463) 2024-12-12T01:21:24,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742363_1539 (size=463) 2024-12-12T01:21:24,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742363_1539 (size=463) 2024-12-12T01:21:24,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742364_1540 (size=22153) 2024-12-12T01:21:24,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742364_1540 (size=22153) 2024-12-12T01:21:24,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742364_1540 (size=22153) 2024-12-12T01:21:24,488 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0010/container_1733966109028_0010_01_000002/launch_container.sh] 2024-12-12T01:21:24,488 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0010/container_1733966109028_0010_01_000002/container_tokens] 2024-12-12T01:21:24,488 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_1/usercache/jenkins/appcache/application_1733966109028_0010/container_1733966109028_0010_01_000002/sysfs] 2024-12-12T01:21:24,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742365_1541 (size=349696) 2024-12-12T01:21:24,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742365_1541 (size=349696) 2024-12-12T01:21:24,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742365_1541 (size=349696) 2024-12-12T01:21:24,505 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0010_000001 (auth:SIMPLE) from 127.0.0.1:47600 2024-12-12T01:21:26,352 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-12T01:21:26,354 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-12T01:21:26,360 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-12-12T01:21:26,360 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-12T01:21:26,361 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-12T01:21:26,361 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-12T01:21:26,361 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-12T01:21:26,361 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-12T01:21:26,361 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966462742/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966462742/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-12T01:21:26,362 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966462742/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-12T01:21:26,362 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966462742/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-12T01:21:26,368 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-12T01:21:26,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-12T01:21:26,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-12T01:21:26,371 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966486371"}]},"ts":"1733966486371"} 2024-12-12T01:21:26,373 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-12T01:21:26,373 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-12T01:21:26,374 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-12T01:21:26,375 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=226, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ccca775fd0cd0e7c92be4d830616669d, UNASSIGN}, {pid=227, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=db1338a217366846c95e41df7545534c, UNASSIGN}] 2024-12-12T01:21:26,376 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=227, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=db1338a217366846c95e41df7545534c, UNASSIGN 2024-12-12T01:21:26,376 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=226, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ccca775fd0cd0e7c92be4d830616669d, UNASSIGN 2024-12-12T01:21:26,377 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=226 updating hbase:meta row=ccca775fd0cd0e7c92be4d830616669d, regionState=CLOSING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:21:26,377 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=db1338a217366846c95e41df7545534c, regionState=CLOSING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:21:26,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=226, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ccca775fd0cd0e7c92be4d830616669d, UNASSIGN because future has completed 2024-12-12T01:21:26,379 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:21:26,379 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=226, state=RUNNABLE, hasLock=false; CloseRegionProcedure ccca775fd0cd0e7c92be4d830616669d, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:21:26,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=227, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=db1338a217366846c95e41df7545534c, UNASSIGN because future has completed 2024-12-12T01:21:26,381 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:21:26,381 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure db1338a217366846c95e41df7545534c, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:21:26,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-12T01:21:26,532 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(122): Close ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:21:26,532 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:21:26,532 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1722): Closing ccca775fd0cd0e7c92be4d830616669d, disabling compactions & flushes 2024-12-12T01:21:26,532 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:21:26,532 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:21:26,532 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. after waiting 0 ms 2024-12-12T01:21:26,533 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:21:26,534 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] handler.UnassignRegionHandler(122): Close db1338a217366846c95e41df7545534c 2024-12-12T01:21:26,534 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:21:26,534 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1722): Closing db1338a217366846c95e41df7545534c, disabling compactions & flushes 2024-12-12T01:21:26,534 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. 2024-12-12T01:21:26,534 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. 2024-12-12T01:21:26,535 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. after waiting 0 ms 2024-12-12T01:21:26,535 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. 2024-12-12T01:21:26,541 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:21:26,542 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:21:26,542 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:21:26,542 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:21:26,542 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c. 2024-12-12T01:21:26,542 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d. 2024-12-12T01:21:26,542 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1676): Region close journal for db1338a217366846c95e41df7545534c: Waiting for close lock at 1733966486534Running coprocessor pre-close hooks at 1733966486534Disabling compacts and flushes for region at 1733966486534Disabling writes for close at 1733966486535 (+1 ms)Writing region close event to WAL at 1733966486535Running coprocessor post-close hooks at 1733966486542 (+7 ms)Closed at 1733966486542 2024-12-12T01:21:26,542 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1676): Region close journal for ccca775fd0cd0e7c92be4d830616669d: Waiting for close lock at 1733966486532Running coprocessor pre-close hooks at 1733966486532Disabling compacts and flushes for region at 1733966486532Disabling writes for close at 1733966486532Writing region close event to WAL at 1733966486534 (+2 ms)Running coprocessor post-close hooks at 1733966486542 (+8 ms)Closed at 1733966486542 2024-12-12T01:21:26,544 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(157): Closed ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:21:26,545 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=226 updating hbase:meta row=ccca775fd0cd0e7c92be4d830616669d, regionState=CLOSED 2024-12-12T01:21:26,545 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] handler.UnassignRegionHandler(157): Closed db1338a217366846c95e41df7545534c 2024-12-12T01:21:26,545 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=db1338a217366846c95e41df7545534c, regionState=CLOSED 2024-12-12T01:21:26,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=228, ppid=226, state=RUNNABLE, hasLock=false; CloseRegionProcedure ccca775fd0cd0e7c92be4d830616669d, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:21:26,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure db1338a217366846c95e41df7545534c, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:21:26,549 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=226 2024-12-12T01:21:26,549 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=226, state=SUCCESS, hasLock=false; CloseRegionProcedure ccca775fd0cd0e7c92be4d830616669d, server=36236b0d7605,38295,1733966101221 in 168 msec 2024-12-12T01:21:26,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=229, resume processing ppid=227 2024-12-12T01:21:26,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=227, state=SUCCESS, hasLock=false; CloseRegionProcedure db1338a217366846c95e41df7545534c, server=36236b0d7605,33883,1733966101479 in 167 msec 2024-12-12T01:21:26,553 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=225, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ccca775fd0cd0e7c92be4d830616669d, UNASSIGN in 174 msec 2024-12-12T01:21:26,554 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=227, resume processing ppid=225 2024-12-12T01:21:26,554 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, ppid=225, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=db1338a217366846c95e41df7545534c, UNASSIGN in 177 msec 2024-12-12T01:21:26,556 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=225, resume processing ppid=224 2024-12-12T01:21:26,556 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 181 msec 2024-12-12T01:21:26,557 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966486557"}]},"ts":"1733966486557"} 2024-12-12T01:21:26,559 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-12T01:21:26,559 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-12T01:21:26,562 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 192 msec 2024-12-12T01:21:26,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-12T01:21:26,685 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-12T01:21:26,685 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-12T01:21:26,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=230, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-12T01:21:26,687 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=230, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-12T01:21:26,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-12T01:21:26,688 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=230, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-12T01:21:26,690 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-12T01:21:26,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T01:21:26,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T01:21:26,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T01:21:26,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T01:21:26,695 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-12T01:21:26,695 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-12T01:21:26,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T01:21:26,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:21:26,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:21:26,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:21:26,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-12T01:21:26,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:21:26,696 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-12-12T01:21:26,696 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-12-12T01:21:26,696 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-12T01:21:26,696 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-12T01:21:26,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-12T01:21:26,697 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:26,697 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:26,697 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:26,698 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:26,705 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:21:26,705 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c 2024-12-12T01:21:26,707 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/recovered.edits] 2024-12-12T01:21:26,707 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/recovered.edits] 2024-12-12T01:21:26,711 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/cf/73c03e8cb1b24a43aec401517fc362e2 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/cf/73c03e8cb1b24a43aec401517fc362e2 2024-12-12T01:21:26,714 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/cf/2827a2cc09464cc4913d7007c6acafcb to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/cf/2827a2cc09464cc4913d7007c6acafcb 2024-12-12T01:21:26,714 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d/recovered.edits/9.seqid 2024-12-12T01:21:26,715 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/ccca775fd0cd0e7c92be4d830616669d 2024-12-12T01:21:26,717 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c/recovered.edits/9.seqid 2024-12-12T01:21:26,718 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportWithChecksum/db1338a217366846c95e41df7545534c 2024-12-12T01:21:26,718 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-12T01:21:26,720 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=230, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-12T01:21:26,727 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-12T01:21:26,731 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-12T01:21:26,732 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=230, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-12T01:21:26,732 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-12T01:21:26,733 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966486732"}]},"ts":"9223372036854775807"} 2024-12-12T01:21:26,733 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966486732"}]},"ts":"9223372036854775807"} 2024-12-12T01:21:26,735 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-12T01:21:26,735 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ccca775fd0cd0e7c92be4d830616669d, NAME => 'testtb-testExportWithChecksum,,1733966414539.ccca775fd0cd0e7c92be4d830616669d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => db1338a217366846c95e41df7545534c, NAME => 'testtb-testExportWithChecksum,1,1733966414539.db1338a217366846c95e41df7545534c.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T01:21:26,736 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-12T01:21:26,736 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733966486736"}]},"ts":"9223372036854775807"} 2024-12-12T01:21:26,738 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-12-12T01:21:26,739 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=230, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-12T01:21:26,740 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 54 msec 2024-12-12T01:21:26,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-12T01:21:26,805 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-12-12T01:21:26,805 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-12T01:21:26,812 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-12-12T01:21:26,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-12T01:21:26,815 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-12-12T01:21:26,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-12T01:21:26,847 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=811 (was 809) Potentially hanging thread: HFileArchiver-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:49876 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:45461 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1620980124_1 at /127.0.0.1:49850 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 18808) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1234011679_22 at /127.0.0.1:40200 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:56180 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-8176 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45461 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) - Thread LEAK? -, OpenFileDescriptor=782 (was 790), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=939 (was 590) - SystemLoadAverage LEAK? -, ProcessCount=21 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=5194 (was 5703) 2024-12-12T01:21:26,847 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-12T01:21:26,873 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=811, OpenFileDescriptor=782, MaxFileDescriptor=1048576, SystemLoadAverage=939, ProcessCount=21, AvailableMemoryMB=5193 2024-12-12T01:21:26,873 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-12T01:21:26,875 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T01:21:26,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=231, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:26,878 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T01:21:26,878 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:21:26,878 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 231 2024-12-12T01:21:26,879 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T01:21:26,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-12T01:21:26,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742366_1542 (size=418) 2024-12-12T01:21:26,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742366_1542 (size=418) 2024-12-12T01:21:26,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742366_1542 (size=418) 2024-12-12T01:21:26,892 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f05b6a016511c5fd533c5f218e73e8d2, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:21:26,892 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 3d5c7f0d35e469d819acec02f89cfa9b, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:21:26,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742368_1544 (size=79) 2024-12-12T01:21:26,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742367_1543 (size=79) 2024-12-12T01:21:26,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742368_1544 (size=79) 2024-12-12T01:21:26,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742368_1544 (size=79) 2024-12-12T01:21:26,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742367_1543 (size=79) 2024-12-12T01:21:26,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742367_1543 (size=79) 2024-12-12T01:21:26,906 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:21:26,906 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing f05b6a016511c5fd533c5f218e73e8d2, disabling compactions & flushes 2024-12-12T01:21:26,906 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:26,906 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:26,906 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. after waiting 0 ms 2024-12-12T01:21:26,906 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:26,906 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:26,907 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for f05b6a016511c5fd533c5f218e73e8d2: Waiting for close lock at 1733966486906Disabling compacts and flushes for region at 1733966486906Disabling writes for close at 1733966486906Writing region close event to WAL at 1733966486906Closed at 1733966486906 2024-12-12T01:21:26,907 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:21:26,907 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 3d5c7f0d35e469d819acec02f89cfa9b, disabling compactions & flushes 2024-12-12T01:21:26,907 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. 2024-12-12T01:21:26,907 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. 2024-12-12T01:21:26,907 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. after waiting 0 ms 2024-12-12T01:21:26,907 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. 2024-12-12T01:21:26,907 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. 2024-12-12T01:21:26,907 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 3d5c7f0d35e469d819acec02f89cfa9b: Waiting for close lock at 1733966486907Disabling compacts and flushes for region at 1733966486907Disabling writes for close at 1733966486907Writing region close event to WAL at 1733966486907Closed at 1733966486907 2024-12-12T01:21:26,908 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T01:21:26,909 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733966486908"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966486908"}]},"ts":"1733966486908"} 2024-12-12T01:21:26,909 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733966486908"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733966486908"}]},"ts":"1733966486908"} 2024-12-12T01:21:26,911 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-12T01:21:26,913 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T01:21:26,913 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966486913"}]},"ts":"1733966486913"} 2024-12-12T01:21:26,915 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-12T01:21:26,915 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {36236b0d7605=0} racks are {/default-rack=0} 2024-12-12T01:21:26,916 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-12T01:21:26,916 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-12T01:21:26,916 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-12T01:21:26,916 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-12T01:21:26,916 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-12T01:21:26,916 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-12T01:21:26,916 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-12T01:21:26,916 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-12T01:21:26,916 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-12T01:21:26,916 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-12T01:21:26,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f05b6a016511c5fd533c5f218e73e8d2, ASSIGN}, {pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3d5c7f0d35e469d819acec02f89cfa9b, ASSIGN}] 2024-12-12T01:21:26,918 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f05b6a016511c5fd533c5f218e73e8d2, ASSIGN 2024-12-12T01:21:26,918 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3d5c7f0d35e469d819acec02f89cfa9b, ASSIGN 2024-12-12T01:21:26,919 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f05b6a016511c5fd533c5f218e73e8d2, ASSIGN; state=OFFLINE, location=36236b0d7605,33883,1733966101479; forceNewPlan=false, retain=false 2024-12-12T01:21:26,919 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3d5c7f0d35e469d819acec02f89cfa9b, ASSIGN; state=OFFLINE, location=36236b0d7605,38295,1733966101221; forceNewPlan=false, retain=false 2024-12-12T01:21:26,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-12T01:21:27,069 INFO [36236b0d7605:36083 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-12T01:21:27,069 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=f05b6a016511c5fd533c5f218e73e8d2, regionState=OPENING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:21:27,069 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=3d5c7f0d35e469d819acec02f89cfa9b, regionState=OPENING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:21:27,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3d5c7f0d35e469d819acec02f89cfa9b, ASSIGN because future has completed 2024-12-12T01:21:27,072 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=234, ppid=233, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3d5c7f0d35e469d819acec02f89cfa9b, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:21:27,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f05b6a016511c5fd533c5f218e73e8d2, ASSIGN because future has completed 2024-12-12T01:21:27,073 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=232, state=RUNNABLE, hasLock=false; OpenRegionProcedure f05b6a016511c5fd533c5f218e73e8d2, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:21:27,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-12T01:21:27,226 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:27,227 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. 2024-12-12T01:21:27,227 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(7752): Opening region: {ENCODED => f05b6a016511c5fd533c5f218e73e8d2, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2.', STARTKEY => '', ENDKEY => '1'} 2024-12-12T01:21:27,227 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(7752): Opening region: {ENCODED => 3d5c7f0d35e469d819acec02f89cfa9b, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b.', STARTKEY => '1', ENDKEY => ''} 2024-12-12T01:21:27,227 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. service=AccessControlService 2024-12-12T01:21:27,227 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. service=AccessControlService 2024-12-12T01:21:27,227 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:21:27,227 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-12T01:21:27,227 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:27,227 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:27,228 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:21:27,228 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T01:21:27,228 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(7794): checking encryption for f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:27,228 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(7794): checking encryption for 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:27,228 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(7797): checking classloading for 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:27,228 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(7797): checking classloading for f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:27,229 INFO [StoreOpener-f05b6a016511c5fd533c5f218e73e8d2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:27,229 INFO [StoreOpener-3d5c7f0d35e469d819acec02f89cfa9b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:27,230 INFO [StoreOpener-3d5c7f0d35e469d819acec02f89cfa9b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3d5c7f0d35e469d819acec02f89cfa9b columnFamilyName cf 2024-12-12T01:21:27,230 INFO [StoreOpener-f05b6a016511c5fd533c5f218e73e8d2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f05b6a016511c5fd533c5f218e73e8d2 columnFamilyName cf 2024-12-12T01:21:27,230 DEBUG [StoreOpener-f05b6a016511c5fd533c5f218e73e8d2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:21:27,230 DEBUG [StoreOpener-3d5c7f0d35e469d819acec02f89cfa9b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T01:21:27,231 INFO [StoreOpener-f05b6a016511c5fd533c5f218e73e8d2-1 {}] regionserver.HStore(327): Store=f05b6a016511c5fd533c5f218e73e8d2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:21:27,231 INFO [StoreOpener-3d5c7f0d35e469d819acec02f89cfa9b-1 {}] regionserver.HStore(327): Store=3d5c7f0d35e469d819acec02f89cfa9b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T01:21:27,231 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1038): replaying wal for f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:27,231 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1038): replaying wal for 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:27,231 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:27,232 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:27,232 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:27,232 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:27,232 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1048): stopping wal replay for f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:27,232 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1060): Cleaning up temporary data for f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:27,232 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1048): stopping wal replay for 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:27,232 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1060): Cleaning up temporary data for 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:27,233 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1093): writing seq id for f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:27,234 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1093): writing seq id for 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:27,235 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:21:27,235 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T01:21:27,236 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1114): Opened 3d5c7f0d35e469d819acec02f89cfa9b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71557729, jitterRate=0.06629325449466705}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:21:27,236 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1114): Opened f05b6a016511c5fd533c5f218e73e8d2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71775192, jitterRate=0.06953370571136475}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T01:21:27,236 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:27,236 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:27,236 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1006): Region open journal for f05b6a016511c5fd533c5f218e73e8d2: Running coprocessor pre-open hook at 1733966487228Writing region info on filesystem at 1733966487228Initializing all the Stores at 1733966487228Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966487229 (+1 ms)Cleaning up temporary data from old regions at 1733966487232 (+3 ms)Running coprocessor post-open hooks at 1733966487236 (+4 ms)Region opened successfully at 1733966487236 2024-12-12T01:21:27,236 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1006): Region open journal for 3d5c7f0d35e469d819acec02f89cfa9b: Running coprocessor pre-open hook at 1733966487228Writing region info on filesystem at 1733966487228Initializing all the Stores at 1733966487228Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733966487229 (+1 ms)Cleaning up temporary data from old regions at 1733966487232 (+3 ms)Running coprocessor post-open hooks at 1733966487236 (+4 ms)Region opened successfully at 1733966487236 2024-12-12T01:21:27,237 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2., pid=235, masterSystemTime=1733966487224 2024-12-12T01:21:27,237 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b., pid=234, masterSystemTime=1733966487224 2024-12-12T01:21:27,239 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. 2024-12-12T01:21:27,239 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. 2024-12-12T01:21:27,239 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=3d5c7f0d35e469d819acec02f89cfa9b, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:21:27,239 DEBUG [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:27,239 INFO [RS_OPEN_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:27,240 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=f05b6a016511c5fd533c5f218e73e8d2, regionState=OPEN, openSeqNum=2, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:21:27,241 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=234, ppid=233, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3d5c7f0d35e469d819acec02f89cfa9b, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:21:27,242 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=232, state=RUNNABLE, hasLock=false; OpenRegionProcedure f05b6a016511c5fd533c5f218e73e8d2, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:21:27,242 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=36236b0d7605,33883,1733966101479, table=testtb-testExportFileSystemStateWithSkipTmp, region=f05b6a016511c5fd533c5f218e73e8d2. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-12T01:21:27,244 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=234, resume processing ppid=233 2024-12-12T01:21:27,244 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, ppid=233, state=SUCCESS, hasLock=false; OpenRegionProcedure 3d5c7f0d35e469d819acec02f89cfa9b, server=36236b0d7605,38295,1733966101221 in 170 msec 2024-12-12T01:21:27,245 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=232 2024-12-12T01:21:27,245 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3d5c7f0d35e469d819acec02f89cfa9b, ASSIGN in 327 msec 2024-12-12T01:21:27,245 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=232, state=SUCCESS, hasLock=false; OpenRegionProcedure f05b6a016511c5fd533c5f218e73e8d2, server=36236b0d7605,33883,1733966101479 in 170 msec 2024-12-12T01:21:27,247 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=232, resume processing ppid=231 2024-12-12T01:21:27,247 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f05b6a016511c5fd533c5f218e73e8d2, ASSIGN in 328 msec 2024-12-12T01:21:27,247 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T01:21:27,247 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966487247"}]},"ts":"1733966487247"} 2024-12-12T01:21:27,249 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-12T01:21:27,249 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T01:21:27,250 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-12T01:21:27,252 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-12T01:21:27,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:21:27,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:21:27,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:21:27,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:21:27,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:27,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:27,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:27,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:27,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:27,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:27,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:27,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:27,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 381 msec 2024-12-12T01:21:27,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-12T01:21:27,505 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-12T01:21:27,505 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-12-12T01:21:27,505 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:21:27,511 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-12-12T01:21:27,511 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:21:27,511 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-12-12T01:21:27,511 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-12T01:21:27,515 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-12T01:21:27,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966487515 (current time:1733966487515). 2024-12-12T01:21:27,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:21:27,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-12T01:21:27,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:21:27,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70a429df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:21:27,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:21:27,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:21:27,517 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:21:27,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:21:27,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:21:27,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8ea1b9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:21:27,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:21:27,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:21:27,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:21:27,519 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56502, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:21:27,519 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b0478ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:21:27,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:21:27,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:21:27,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:21:27,521 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41518, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:21:27,529 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:21:27,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:21:27,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:21:27,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:21:27,530 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:21:27,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c545967, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:21:27,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:21:27,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:21:27,531 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:21:27,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:21:27,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:21:27,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46bbdfc4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:21:27,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:21:27,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:21:27,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:21:27,533 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56514, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:21:27,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14c38685, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:21:27,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:21:27,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:21:27,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:21:27,535 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41534, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:21:27,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:21:27,540 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:21:27,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:21:27,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:21:27,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:21:27,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-12T01:21:27,541 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:21:27,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:21:27,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=236, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-12T01:21:27,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 236 2024-12-12T01:21:27,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-12T01:21:27,544 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:21:27,545 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:21:27,547 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:21:27,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742369_1545 (size=203) 2024-12-12T01:21:27,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742369_1545 (size=203) 2024-12-12T01:21:27,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742369_1545 (size=203) 2024-12-12T01:21:27,553 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:21:27,554 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=237, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f05b6a016511c5fd533c5f218e73e8d2}, {pid=238, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3d5c7f0d35e469d819acec02f89cfa9b}] 2024-12-12T01:21:27,554 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=237, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:27,554 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:27,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-12T01:21:27,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=237 2024-12-12T01:21:27,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=238 2024-12-12T01:21:27,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:27,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. 2024-12-12T01:21:27,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.HRegion(2603): Flush status journal for f05b6a016511c5fd533c5f218e73e8d2: 2024-12-12T01:21:27,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.HRegion(2603): Flush status journal for 3d5c7f0d35e469d819acec02f89cfa9b: 2024-12-12T01:21:27,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-12T01:21:27,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-12T01:21:27,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:27,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:27,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:21:27,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:21:27,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:21:27,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-12T01:21:27,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742370_1546 (size=82) 2024-12-12T01:21:27,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742370_1546 (size=82) 2024-12-12T01:21:27,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742370_1546 (size=82) 2024-12-12T01:21:27,723 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. 2024-12-12T01:21:27,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=238 2024-12-12T01:21:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=238 2024-12-12T01:21:27,724 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:27,725 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:27,727 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=236, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3d5c7f0d35e469d819acec02f89cfa9b in 172 msec 2024-12-12T01:21:27,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742371_1547 (size=82) 2024-12-12T01:21:27,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742371_1547 (size=82) 2024-12-12T01:21:27,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742371_1547 (size=82) 2024-12-12T01:21:27,730 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:27,731 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=237 2024-12-12T01:21:27,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=237 2024-12-12T01:21:27,731 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:27,731 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=237, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:27,734 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=237, resume processing ppid=236 2024-12-12T01:21:27,734 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, ppid=236, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f05b6a016511c5fd533c5f218e73e8d2 in 178 msec 2024-12-12T01:21:27,734 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:21:27,735 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:21:27,735 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:21:27,735 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:27,736 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:27,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742372_1548 (size=585) 2024-12-12T01:21:27,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742372_1548 (size=585) 2024-12-12T01:21:27,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742372_1548 (size=585) 2024-12-12T01:21:27,747 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:21:27,754 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:21:27,754 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:27,756 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:21:27,756 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 236 2024-12-12T01:21:27,758 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 214 msec 2024-12-12T01:21:27,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-12T01:21:27,865 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-12T01:21:27,869 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='0c1d798764f7962f9418806bab259fdcb', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2., hostname=36236b0d7605,33883,1733966101479, seqNum=2] 2024-12-12T01:21:27,870 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='19ae74254cfb908666efe2b4b19cb4f4e', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:21:27,871 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='28a533590c93e1defdafa5f59e95ced1c', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:21:27,872 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='349db10d6d3ebff62d6daa5b89bfa07c0', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:21:27,873 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='4ca3a9262dc6892b1abad9ff172c84041', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:21:27,873 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='5a342bdd162e997c8615a8db8472594a7', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:21:27,874 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='6d97ecc5b22f3f40f873f8eade4c316fe', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b., hostname=36236b0d7605,38295,1733966101221, seqNum=2] 2024-12-12T01:21:27,876 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33883 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:21:27,878 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38295 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-12T01:21:27,879 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-12T01:21:27,881 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:27,881 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:27,881 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-12T01:21:27,883 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-12T01:21:27,889 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-12T01:21:27,899 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-12T01:21:27,902 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-12T01:21:27,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733966487902 (current time:1733966487902). 2024-12-12T01:21:27,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-12T01:21:27,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-12T01:21:27,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-12T01:21:27,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13ee4087, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:21:27,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:21:27,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:21:27,905 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:21:27,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:21:27,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:21:27,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@599d2b1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:21:27,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:21:27,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:21:27,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:21:27,907 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56526, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:21:27,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23f095f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:21:27,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:21:27,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:21:27,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:21:27,910 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41546, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:21:27,912 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:21:27,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:21:27,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:21:27,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:21:27,912 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:21:27,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cc43196, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:21:27,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ClusterIdFetcher(90): Going to request 36236b0d7605,36083,-1 for getting cluster id 2024-12-12T01:21:27,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-12T01:21:27,914 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c7aab131-a6d9-4901-b39f-42031992260b' 2024-12-12T01:21:27,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-12T01:21:27,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c7aab131-a6d9-4901-b39f-42031992260b" 2024-12-12T01:21:27,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@772d1291, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:21:27,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [36236b0d7605,36083,-1] 2024-12-12T01:21:27,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-12T01:21:27,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:21:27,917 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56542, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-12T01:21:27,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17af4ebf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T01:21:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-12T01:21:27,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=36236b0d7605,44743,1733966101408, seqNum=-1] 2024-12-12T01:21:27,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T01:21:27,921 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41548, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T01:21:27,923 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., hostname=36236b0d7605,44743,1733966101408, seqNum=2] 2024-12-12T01:21:27,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083. 2024-12-12T01:21:27,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor231.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-12T01:21:27,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:21:27,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:21:27,925 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:21:27,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-12T01:21:27,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-12T01:21:27,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-12T01:21:27,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-12T01:21:27,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-12T01:21:27,928 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-12T01:21:27,929 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-12T01:21:27,932 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-12T01:21:27,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742373_1549 (size=198) 2024-12-12T01:21:27,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742373_1549 (size=198) 2024-12-12T01:21:27,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742373_1549 (size=198) 2024-12-12T01:21:27,967 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-12T01:21:27,968 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f05b6a016511c5fd533c5f218e73e8d2}, {pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3d5c7f0d35e469d819acec02f89cfa9b}] 2024-12-12T01:21:27,970 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:27,970 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:28,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-12T01:21:28,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33883 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=240 2024-12-12T01:21:28,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:28,122 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2902): Flushing f05b6a016511c5fd533c5f218e73e8d2 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-12T01:21:28,123 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38295 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=241 2024-12-12T01:21:28,123 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. 2024-12-12T01:21:28,123 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2902): Flushing 3d5c7f0d35e469d819acec02f89cfa9b 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-12T01:21:28,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2/.tmp/cf/e84b3c3245a34a769fb34af5490df5d3 is 71, key is 0403229d541179a33c4934ce443ce182/cf:q/1733966487876/Put/seqid=0 2024-12-12T01:21:28,139 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b/.tmp/cf/51da632947184898a6b9b8a1a39b9988 is 71, key is 10385d73751381b5fc34ca7f3031c4e2/cf:q/1733966487878/Put/seqid=0 2024-12-12T01:21:28,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742374_1550 (size=5424) 2024-12-12T01:21:28,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742374_1550 (size=5424) 2024-12-12T01:21:28,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742374_1550 (size=5424) 2024-12-12T01:21:28,151 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2/.tmp/cf/e84b3c3245a34a769fb34af5490df5d3 2024-12-12T01:21:28,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742375_1551 (size=8188) 2024-12-12T01:21:28,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742375_1551 (size=8188) 2024-12-12T01:21:28,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742375_1551 (size=8188) 2024-12-12T01:21:28,155 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b/.tmp/cf/51da632947184898a6b9b8a1a39b9988 2024-12-12T01:21:28,156 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2/.tmp/cf/e84b3c3245a34a769fb34af5490df5d3 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2/cf/e84b3c3245a34a769fb34af5490df5d3 2024-12-12T01:21:28,160 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b/.tmp/cf/51da632947184898a6b9b8a1a39b9988 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b/cf/51da632947184898a6b9b8a1a39b9988 2024-12-12T01:21:28,162 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2/cf/e84b3c3245a34a769fb34af5490df5d3, entries=5, sequenceid=6, filesize=5.3 K 2024-12-12T01:21:28,163 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for f05b6a016511c5fd533c5f218e73e8d2 in 41ms, sequenceid=6, compaction requested=false 2024-12-12T01:21:28,164 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-12T01:21:28,164 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2603): Flush status journal for f05b6a016511c5fd533c5f218e73e8d2: 2024-12-12T01:21:28,164 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-12T01:21:28,164 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:28,164 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:21:28,164 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2/cf/e84b3c3245a34a769fb34af5490df5d3] hfiles 2024-12-12T01:21:28,164 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2/cf/e84b3c3245a34a769fb34af5490df5d3 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:28,168 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b/cf/51da632947184898a6b9b8a1a39b9988, entries=45, sequenceid=6, filesize=8.0 K 2024-12-12T01:21:28,169 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 3d5c7f0d35e469d819acec02f89cfa9b in 46ms, sequenceid=6, compaction requested=false 2024-12-12T01:21:28,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2603): Flush status journal for 3d5c7f0d35e469d819acec02f89cfa9b: 2024-12-12T01:21:28,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-12T01:21:28,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:28,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-12T01:21:28,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b/cf/51da632947184898a6b9b8a1a39b9988] hfiles 2024-12-12T01:21:28,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b/cf/51da632947184898a6b9b8a1a39b9988 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:28,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742376_1552 (size=121) 2024-12-12T01:21:28,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742376_1552 (size=121) 2024-12-12T01:21:28,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742376_1552 (size=121) 2024-12-12T01:21:28,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:28,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=240 2024-12-12T01:21:28,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=240 2024-12-12T01:21:28,176 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:28,176 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:28,179 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f05b6a016511c5fd533c5f218e73e8d2 in 211 msec 2024-12-12T01:21:28,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742377_1553 (size=121) 2024-12-12T01:21:28,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742377_1553 (size=121) 2024-12-12T01:21:28,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742377_1553 (size=121) 2024-12-12T01:21:28,196 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. 2024-12-12T01:21:28,197 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/36236b0d7605:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=241 2024-12-12T01:21:28,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster(4169): Remote procedure done, pid=241 2024-12-12T01:21:28,197 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:28,197 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:28,200 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=241, resume processing ppid=239 2024-12-12T01:21:28,200 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3d5c7f0d35e469d819acec02f89cfa9b in 231 msec 2024-12-12T01:21:28,200 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-12T01:21:28,201 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-12T01:21:28,202 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-12T01:21:28,202 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:28,202 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:28,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742378_1554 (size=663) 2024-12-12T01:21:28,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742378_1554 (size=663) 2024-12-12T01:21:28,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742378_1554 (size=663) 2024-12-12T01:21:28,232 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-12T01:21:28,237 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-12T01:21:28,237 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:28,238 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-12T01:21:28,238 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-12T01:21:28,240 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 313 msec 2024-12-12T01:21:28,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-12T01:21:28,245 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-12T01:21:28,245 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966488245 2024-12-12T01:21:28,245 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33353, tgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966488245, rawTgtDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966488245, srcFsUri=hdfs://localhost:33353, srcDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:21:28,291 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33353, inputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221 2024-12-12T01:21:28,291 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966488245, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966488245/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:28,293 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-12T01:21:28,297 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966488245/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:28,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742380_1556 (size=663) 2024-12-12T01:21:28,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742380_1556 (size=663) 2024-12-12T01:21:28,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742380_1556 (size=663) 2024-12-12T01:21:28,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742379_1555 (size=198) 2024-12-12T01:21:28,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742379_1555 (size=198) 2024-12-12T01:21:28,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742379_1555 (size=198) 2024-12-12T01:21:28,720 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:28,720 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:28,721 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:28,913 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:21:29,877 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-11834611441091760775.jar 2024-12-12T01:21:29,877 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:29,878 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:29,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop-2908234615386768875.jar 2024-12-12T01:21:29,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:29,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:29,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:29,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:29,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:29,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-12T01:21:29,974 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-12T01:21:29,974 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-12T01:21:29,974 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-12T01:21:29,975 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-12T01:21:29,975 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-12T01:21:29,975 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-12T01:21:29,976 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-12T01:21:29,976 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-12T01:21:29,976 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-12T01:21:29,976 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-12T01:21:29,977 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-12T01:21:29,977 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:21:29,978 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:21:29,978 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:21:29,978 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:21:29,979 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-12T01:21:29,979 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:21:29,979 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-12T01:21:30,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742381_1557 (size=131440) 2024-12-12T01:21:30,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742381_1557 (size=131440) 2024-12-12T01:21:30,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742381_1557 (size=131440) 2024-12-12T01:21:30,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742382_1558 (size=4188619) 2024-12-12T01:21:30,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742382_1558 (size=4188619) 2024-12-12T01:21:30,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742382_1558 (size=4188619) 2024-12-12T01:21:30,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742383_1559 (size=1323991) 2024-12-12T01:21:30,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742383_1559 (size=1323991) 2024-12-12T01:21:30,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742383_1559 (size=1323991) 2024-12-12T01:21:30,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742384_1560 (size=903541) 2024-12-12T01:21:30,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742384_1560 (size=903541) 2024-12-12T01:21:30,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742384_1560 (size=903541) 2024-12-12T01:21:30,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742385_1561 (size=8360360) 2024-12-12T01:21:30,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742385_1561 (size=8360360) 2024-12-12T01:21:30,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742385_1561 (size=8360360) 2024-12-12T01:21:30,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742386_1562 (size=1877034) 2024-12-12T01:21:30,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742386_1562 (size=1877034) 2024-12-12T01:21:30,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742386_1562 (size=1877034) 2024-12-12T01:21:30,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742387_1563 (size=77835) 2024-12-12T01:21:30,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742387_1563 (size=77835) 2024-12-12T01:21:30,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742387_1563 (size=77835) 2024-12-12T01:21:30,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742388_1564 (size=30949) 2024-12-12T01:21:30,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742388_1564 (size=30949) 2024-12-12T01:21:30,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742388_1564 (size=30949) 2024-12-12T01:21:30,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742389_1565 (size=1597213) 2024-12-12T01:21:30,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742389_1565 (size=1597213) 2024-12-12T01:21:30,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742389_1565 (size=1597213) 2024-12-12T01:21:30,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742390_1566 (size=4695811) 2024-12-12T01:21:30,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742390_1566 (size=4695811) 2024-12-12T01:21:30,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742390_1566 (size=4695811) 2024-12-12T01:21:30,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742391_1567 (size=443172) 2024-12-12T01:21:30,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742391_1567 (size=443172) 2024-12-12T01:21:30,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742391_1567 (size=443172) 2024-12-12T01:21:30,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742392_1568 (size=232957) 2024-12-12T01:21:30,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742392_1568 (size=232957) 2024-12-12T01:21:30,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742392_1568 (size=232957) 2024-12-12T01:21:30,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742393_1569 (size=127628) 2024-12-12T01:21:30,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742393_1569 (size=127628) 2024-12-12T01:21:30,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742393_1569 (size=127628) 2024-12-12T01:21:30,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742394_1570 (size=20406) 2024-12-12T01:21:30,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742394_1570 (size=20406) 2024-12-12T01:21:30,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742394_1570 (size=20406) 2024-12-12T01:21:30,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742395_1571 (size=6426224) 2024-12-12T01:21:30,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742395_1571 (size=6426224) 2024-12-12T01:21:30,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742395_1571 (size=6426224) 2024-12-12T01:21:30,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742396_1572 (size=5175431) 2024-12-12T01:21:30,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742396_1572 (size=5175431) 2024-12-12T01:21:30,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742396_1572 (size=5175431) 2024-12-12T01:21:30,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742397_1573 (size=217634) 2024-12-12T01:21:30,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742397_1573 (size=217634) 2024-12-12T01:21:30,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742397_1573 (size=217634) 2024-12-12T01:21:30,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742398_1574 (size=1832290) 2024-12-12T01:21:30,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742398_1574 (size=1832290) 2024-12-12T01:21:30,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742398_1574 (size=1832290) 2024-12-12T01:21:30,643 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0010_000001 (auth:SIMPLE) from 127.0.0.1:50042 2024-12-12T01:21:30,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742399_1575 (size=322274) 2024-12-12T01:21:30,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742399_1575 (size=322274) 2024-12-12T01:21:30,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742399_1575 (size=322274) 2024-12-12T01:21:30,669 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_0/usercache/jenkins/appcache/application_1733966109028_0010/container_1733966109028_0010_01_000001/launch_container.sh] 2024-12-12T01:21:30,669 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_0/usercache/jenkins/appcache/application_1733966109028_0010/container_1733966109028_0010_01_000001/container_tokens] 2024-12-12T01:21:30,669 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_0/usercache/jenkins/appcache/application_1733966109028_0010/container_1733966109028_0010_01_000001/sysfs] 2024-12-12T01:21:30,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742400_1576 (size=503880) 2024-12-12T01:21:30,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742400_1576 (size=503880) 2024-12-12T01:21:30,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742400_1576 (size=503880) 2024-12-12T01:21:30,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:30,712 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-12T01:21:30,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-12T01:21:30,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742401_1577 (size=29229) 2024-12-12T01:21:30,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742401_1577 (size=29229) 2024-12-12T01:21:30,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742401_1577 (size=29229) 2024-12-12T01:21:30,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742402_1578 (size=24096) 2024-12-12T01:21:30,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742402_1578 (size=24096) 2024-12-12T01:21:30,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742402_1578 (size=24096) 2024-12-12T01:21:30,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742403_1579 (size=111872) 2024-12-12T01:21:30,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742403_1579 (size=111872) 2024-12-12T01:21:30,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742403_1579 (size=111872) 2024-12-12T01:21:30,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742404_1580 (size=45609) 2024-12-12T01:21:30,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742404_1580 (size=45609) 2024-12-12T01:21:30,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742404_1580 (size=45609) 2024-12-12T01:21:30,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742405_1581 (size=136454) 2024-12-12T01:21:30,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742405_1581 (size=136454) 2024-12-12T01:21:30,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742405_1581 (size=136454) 2024-12-12T01:21:30,849 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-12T01:21:30,855 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-12T01:21:30,857 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.0 K 2024-12-12T01:21:30,857 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.3 K 2024-12-12T01:21:30,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742406_1582 (size=469) 2024-12-12T01:21:30,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742406_1582 (size=469) 2024-12-12T01:21:30,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742406_1582 (size=469) 2024-12-12T01:21:30,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742407_1583 (size=21) 2024-12-12T01:21:30,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742407_1583 (size=21) 2024-12-12T01:21:30,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742407_1583 (size=21) 2024-12-12T01:21:30,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742408_1584 (size=304172) 2024-12-12T01:21:30,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742408_1584 (size=304172) 2024-12-12T01:21:30,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742408_1584 (size=304172) 2024-12-12T01:21:30,948 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:21:30,948 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-12T01:21:31,474 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0011_000001 (auth:SIMPLE) from 127.0.0.1:47610 2024-12-12T01:21:32,070 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:21:37,745 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0011_000001 (auth:SIMPLE) from 127.0.0.1:41672 2024-12-12T01:21:38,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742409_1585 (size=349894) 2024-12-12T01:21:38,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742409_1585 (size=349894) 2024-12-12T01:21:38,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742409_1585 (size=349894) 2024-12-12T01:21:40,350 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0011_000001 (auth:SIMPLE) from 127.0.0.1:47428 2024-12-12T01:21:40,350 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0011_000001 (auth:SIMPLE) from 127.0.0.1:45772 2024-12-12T01:21:47,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742410_1586 (size=8188) 2024-12-12T01:21:47,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742410_1586 (size=8188) 2024-12-12T01:21:47,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742410_1586 (size=8188) 2024-12-12T01:21:47,418 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_3/usercache/jenkins/appcache/application_1733966109028_0011/container_1733966109028_0011_01_000002/launch_container.sh] 2024-12-12T01:21:47,418 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_3/usercache/jenkins/appcache/application_1733966109028_0011/container_1733966109028_0011_01_000002/container_tokens] 2024-12-12T01:21:47,418 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_3/usercache/jenkins/appcache/application_1733966109028_0011/container_1733966109028_0011_01_000002/sysfs] 2024-12-12T01:21:48,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742412_1588 (size=5424) 2024-12-12T01:21:48,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742412_1588 (size=5424) 2024-12-12T01:21:48,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742412_1588 (size=5424) 2024-12-12T01:21:48,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742411_1587 (size=22218) 2024-12-12T01:21:48,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742411_1587 (size=22218) 2024-12-12T01:21:48,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742411_1587 (size=22218) 2024-12-12T01:21:48,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742413_1589 (size=477) 2024-12-12T01:21:48,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742413_1589 (size=477) 2024-12-12T01:21:48,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742413_1589 (size=477) 2024-12-12T01:21:48,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742414_1590 (size=22218) 2024-12-12T01:21:48,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742414_1590 (size=22218) 2024-12-12T01:21:48,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742414_1590 (size=22218) 2024-12-12T01:21:48,915 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_0/usercache/jenkins/appcache/application_1733966109028_0011/container_1733966109028_0011_01_000003/launch_container.sh] 2024-12-12T01:21:48,915 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_0/usercache/jenkins/appcache/application_1733966109028_0011/container_1733966109028_0011_01_000003/container_tokens] 2024-12-12T01:21:48,915 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-1_0/usercache/jenkins/appcache/application_1733966109028_0011/container_1733966109028_0011_01_000003/sysfs] 2024-12-12T01:21:48,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742415_1591 (size=349894) 2024-12-12T01:21:48,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742415_1591 (size=349894) 2024-12-12T01:21:48,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742415_1591 (size=349894) 2024-12-12T01:21:48,933 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0011_000001 (auth:SIMPLE) from 127.0.0.1:40742 2024-12-12T01:21:49,246 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 42720 2024-12-12T01:21:50,269 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-12T01:21:50,269 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-12T01:21:50,275 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,275 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-12T01:21:50,275 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-12T01:21:50,275 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,276 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-12T01:21:50,276 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-12T01:21:50,276 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-975961810_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966488245/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966488245/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,276 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966488245/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-12T01:21:50,276 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/export-test/export-1733966488245/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-12T01:21:50,281 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-12T01:21:50,284 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966510284"}]},"ts":"1733966510284"} 2024-12-12T01:21:50,285 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-12T01:21:50,285 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-12T01:21:50,286 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-12T01:21:50,287 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=244, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f05b6a016511c5fd533c5f218e73e8d2, UNASSIGN}, {pid=245, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3d5c7f0d35e469d819acec02f89cfa9b, UNASSIGN}] 2024-12-12T01:21:50,288 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=245, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3d5c7f0d35e469d819acec02f89cfa9b, UNASSIGN 2024-12-12T01:21:50,288 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f05b6a016511c5fd533c5f218e73e8d2, UNASSIGN 2024-12-12T01:21:50,289 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=245 updating hbase:meta row=3d5c7f0d35e469d819acec02f89cfa9b, regionState=CLOSING, regionLocation=36236b0d7605,38295,1733966101221 2024-12-12T01:21:50,289 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=244 updating hbase:meta row=f05b6a016511c5fd533c5f218e73e8d2, regionState=CLOSING, regionLocation=36236b0d7605,33883,1733966101479 2024-12-12T01:21:50,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=245, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3d5c7f0d35e469d819acec02f89cfa9b, UNASSIGN because future has completed 2024-12-12T01:21:50,290 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:21:50,290 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3d5c7f0d35e469d819acec02f89cfa9b, server=36236b0d7605,38295,1733966101221}] 2024-12-12T01:21:50,291 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=244, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f05b6a016511c5fd533c5f218e73e8d2, UNASSIGN because future has completed 2024-12-12T01:21:50,291 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T01:21:50,291 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=247, ppid=244, state=RUNNABLE, hasLock=false; CloseRegionProcedure f05b6a016511c5fd533c5f218e73e8d2, server=36236b0d7605,33883,1733966101479}] 2024-12-12T01:21:50,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-12T01:21:50,443 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] handler.UnassignRegionHandler(122): Close 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:50,443 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:21:50,443 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1722): Closing 3d5c7f0d35e469d819acec02f89cfa9b, disabling compactions & flushes 2024-12-12T01:21:50,443 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. 2024-12-12T01:21:50,443 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. 2024-12-12T01:21:50,443 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. after waiting 0 ms 2024-12-12T01:21:50,443 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. 2024-12-12T01:21:50,444 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] handler.UnassignRegionHandler(122): Close f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:50,444 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-12T01:21:50,444 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1722): Closing f05b6a016511c5fd533c5f218e73e8d2, disabling compactions & flushes 2024-12-12T01:21:50,444 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:50,444 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:50,444 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. after waiting 0 ms 2024-12-12T01:21:50,444 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:50,447 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:21:50,448 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:21:50,448 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b. 2024-12-12T01:21:50,448 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1676): Region close journal for 3d5c7f0d35e469d819acec02f89cfa9b: Waiting for close lock at 1733966510443Running coprocessor pre-close hooks at 1733966510443Disabling compacts and flushes for region at 1733966510443Disabling writes for close at 1733966510443Writing region close event to WAL at 1733966510444 (+1 ms)Running coprocessor post-close hooks at 1733966510448 (+4 ms)Closed at 1733966510448 2024-12-12T01:21:50,450 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] handler.UnassignRegionHandler(157): Closed 3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:50,450 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=245 updating hbase:meta row=3d5c7f0d35e469d819acec02f89cfa9b, regionState=CLOSED 2024-12-12T01:21:50,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=246, ppid=245, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3d5c7f0d35e469d819acec02f89cfa9b, server=36236b0d7605,38295,1733966101221 because future has completed 2024-12-12T01:21:50,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=246, resume processing ppid=245 2024-12-12T01:21:50,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; CloseRegionProcedure 3d5c7f0d35e469d819acec02f89cfa9b, server=36236b0d7605,38295,1733966101221 in 163 msec 2024-12-12T01:21:50,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, ppid=243, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=3d5c7f0d35e469d819acec02f89cfa9b, UNASSIGN in 167 msec 2024-12-12T01:21:50,456 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T01:21:50,457 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:21:50,457 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2. 2024-12-12T01:21:50,457 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1676): Region close journal for f05b6a016511c5fd533c5f218e73e8d2: Waiting for close lock at 1733966510444Running coprocessor pre-close hooks at 1733966510444Disabling compacts and flushes for region at 1733966510444Disabling writes for close at 1733966510444Writing region close event to WAL at 1733966510445 (+1 ms)Running coprocessor post-close hooks at 1733966510456 (+11 ms)Closed at 1733966510457 (+1 ms) 2024-12-12T01:21:50,458 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] handler.UnassignRegionHandler(157): Closed f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:50,458 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=244 updating hbase:meta row=f05b6a016511c5fd533c5f218e73e8d2, regionState=CLOSED 2024-12-12T01:21:50,459 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=247, ppid=244, state=RUNNABLE, hasLock=false; CloseRegionProcedure f05b6a016511c5fd533c5f218e73e8d2, server=36236b0d7605,33883,1733966101479 because future has completed 2024-12-12T01:21:50,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=247, resume processing ppid=244 2024-12-12T01:21:50,462 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=244, state=SUCCESS, hasLock=false; CloseRegionProcedure f05b6a016511c5fd533c5f218e73e8d2, server=36236b0d7605,33883,1733966101479 in 169 msec 2024-12-12T01:21:50,463 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=244, resume processing ppid=243 2024-12-12T01:21:50,463 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=243, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f05b6a016511c5fd533c5f218e73e8d2, UNASSIGN in 175 msec 2024-12-12T01:21:50,465 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=243, resume processing ppid=242 2024-12-12T01:21:50,465 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 178 msec 2024-12-12T01:21:50,467 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733966510467"}]},"ts":"1733966510467"} 2024-12-12T01:21:50,468 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-12T01:21:50,468 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-12T01:21:50,470 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 188 msec 2024-12-12T01:21:50,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-12T01:21:50,605 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-12T01:21:50,606 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] procedure2.ProcedureExecutor(1139): Stored pid=248, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,607 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=248, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,608 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=248, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,610 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44743 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,613 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:50,613 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:50,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-12T01:21:50,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-12T01:21:50,615 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-12T01:21:50,615 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2/recovered.edits] 2024-12-12T01:21:50,615 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b/cf, FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b/recovered.edits] 2024-12-12T01:21:50,616 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-12T01:21:50,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:21:50,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:21:50,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:21:50,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-12T01:21:50,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-12-12T01:21:50,618 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:50,618 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:50,618 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:50,618 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-12T01:21:50,620 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b/cf/51da632947184898a6b9b8a1a39b9988 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b/cf/51da632947184898a6b9b8a1a39b9988 2024-12-12T01:21:50,620 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2/cf/e84b3c3245a34a769fb34af5490df5d3 to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2/cf/e84b3c3245a34a769fb34af5490df5d3 2024-12-12T01:21:50,623 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b/recovered.edits/9.seqid 2024-12-12T01:21:50,623 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2/recovered.edits/9.seqid to hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2/recovered.edits/9.seqid 2024-12-12T01:21:50,623 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/3d5c7f0d35e469d819acec02f89cfa9b 2024-12-12T01:21:50,623 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testtb-testExportFileSystemStateWithSkipTmp/f05b6a016511c5fd533c5f218e73e8d2 2024-12-12T01:21:50,623 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-12T01:21:50,625 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=248, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,629 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-12T01:21:50,631 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-12T01:21:50,632 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=248, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,632 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-12T01:21:50,632 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966510632"}]},"ts":"9223372036854775807"} 2024-12-12T01:21:50,632 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733966510632"}]},"ts":"9223372036854775807"} 2024-12-12T01:21:50,634 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-12T01:21:50,634 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => f05b6a016511c5fd533c5f218e73e8d2, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733966486875.f05b6a016511c5fd533c5f218e73e8d2.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3d5c7f0d35e469d819acec02f89cfa9b, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b.', STARTKEY => '1', ENDKEY => ''}] 2024-12-12T01:21:50,634 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-12T01:21:50,634 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733966510634"}]},"ts":"9223372036854775807"} 2024-12-12T01:21:50,635 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-12T01:21:50,636 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=248, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,637 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 30 msec 2024-12-12T01:21:50,711 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-12-12T01:21:50,725 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,725 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-12T01:21:50,731 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-12T01:21:50,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,734 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-12T01:21:50,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-12T01:21:50,757 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=819 (was 811) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:37020 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-8925 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: process reaper (pid 22472) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-975961810_22 at /127.0.0.1:33470 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40747 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (166867004) connection to localhost/127.0.0.1:40747 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-812396456_1 at /127.0.0.1:39674 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=782 (was 782), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=846 (was 939), ProcessCount=21 (was 21), AvailableMemoryMB=4987 (was 5193) 2024-12-12T01:21:50,757 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=819 is superior to 500 2024-12-12T01:21:50,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-12-12T01:21:50,764 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3837ae7d{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-12T01:21:50,767 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@760d8954{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T01:21:50,767 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T01:21:50,767 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@207b4e3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-12T01:21:50,767 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@658498f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,STOPPED} 2024-12-12T01:21:55,004 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733966109028_0011_000001 (auth:SIMPLE) from 127.0.0.1:34908 2024-12-12T01:21:55,022 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_3/usercache/jenkins/appcache/application_1733966109028_0011/container_1733966109028_0011_01_000001/launch_container.sh] 2024-12-12T01:21:55,022 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_3/usercache/jenkins/appcache/application_1733966109028_0011/container_1733966109028_0011_01_000001/container_tokens] 2024-12-12T01:21:55,022 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_549102062/yarn-2211103595/MiniMRCluster_549102062-localDir-nm-0_3/usercache/jenkins/appcache/application_1733966109028_0011/container_1733966109028_0011_01_000001/sysfs] 2024-12-12T01:21:55,967 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:21:58,913 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:22:04,503 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d644bbcb843c6264137c6748d170fd67, had cached 0 bytes from a total of 8324 2024-12-12T01:22:04,503 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e4e1b654a5a995b6e71de7ab606443fe, had cached 0 bytes from a total of 5288 2024-12-12T01:22:07,780 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@440030ba{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-12T01:22:07,780 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35130132{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T01:22:07,780 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T01:22:07,780 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35f6d89f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-12T01:22:07,781 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a3d235c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,STOPPED} 2024-12-12T01:22:24,788 ERROR [Thread[Thread-396,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-12T01:22:24,788 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@537e6d2d{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-12T01:22:24,789 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@456f2b74{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T01:22:24,789 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T01:22:24,789 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5336f6f1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-12T01:22:24,789 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d1d5bb9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,STOPPED} 2024-12-12T01:22:24,793 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-12T01:22:24,799 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-12T01:22:24,799 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-12T01:22:24,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741830_1006 (size=1151714) 2024-12-12T01:22:24,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741830_1006 (size=1151714) 2024-12-12T01:22:24,807 ERROR [Thread[Thread-419,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-12T01:22:24,810 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6b16933f{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-12T01:22:24,812 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3686a4b4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T01:22:24,812 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T01:22:24,812 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@796e2812{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-12T01:22:24,812 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7efa4ca8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,STOPPED} 2024-12-12T01:22:24,814 ERROR [Thread[Thread-372,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-12T01:22:24,814 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-12-12T01:22:24,814 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-12T01:22:24,814 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-12T01:22:24,814 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T01:22:24,815 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:22:24,815 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:22:24,815 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-12T01:22:24,815 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-12T01:22:24,815 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=144979328, stopped=false 2024-12-12T01:22:24,815 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:22:24,815 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-12T01:22:24,815 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=36236b0d7605,36083,1733966100114 2024-12-12T01:22:24,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T01:22:24,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T01:22:24,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T01:22:24,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:22:24,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:22:24,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:22:24,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T01:22:24,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:22:24,817 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-12T01:22:24,818 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-12T01:22:24,818 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T01:22:24,818 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:22:24,819 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '36236b0d7605,38295,1733966101221' ***** 2024-12-12T01:22:24,819 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:22:24,819 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-12T01:22:24,819 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '36236b0d7605,44743,1733966101408' ***** 2024-12-12T01:22:24,819 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:22:24,819 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-12T01:22:24,819 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '36236b0d7605,33883,1733966101479' ***** 2024-12-12T01:22:24,819 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:22:24,819 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-12T01:22:24,819 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T01:22:24,819 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T01:22:24,819 INFO [RS:0;36236b0d7605:38295 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-12T01:22:24,819 INFO [RS:1;36236b0d7605:44743 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-12T01:22:24,820 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T01:22:24,820 INFO [RS:0;36236b0d7605:38295 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T01:22:24,820 INFO [RS:2;36236b0d7605:33883 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-12T01:22:24,820 INFO [RS:1;36236b0d7605:44743 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T01:22:24,820 INFO [RS:2;36236b0d7605:33883 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T01:22:24,820 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-12T01:22:24,820 INFO [RS:0;36236b0d7605:38295 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T01:22:24,820 INFO [RS:1;36236b0d7605:44743 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T01:22:24,820 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-12T01:22:24,820 INFO [RS:2;36236b0d7605:33883 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T01:22:24,820 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T01:22:24,820 INFO [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(3091): Received CLOSE for a76413eb65b7bad8091ed755bedda4fd 2024-12-12T01:22:24,820 INFO [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(3091): Received CLOSE for e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:22:24,820 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-12T01:22:24,820 INFO [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(3091): Received CLOSE for d644bbcb843c6264137c6748d170fd67 2024-12-12T01:22:24,820 INFO [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(959): stopping server 36236b0d7605,33883,1733966101479 2024-12-12T01:22:24,821 INFO [RS:2;36236b0d7605:33883 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-12T01:22:24,821 INFO [RS:2;36236b0d7605:33883 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;36236b0d7605:33883. 2024-12-12T01:22:24,821 DEBUG [RS:2;36236b0d7605:33883 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T01:22:24,821 DEBUG [RS:2;36236b0d7605:33883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:22:24,821 INFO [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-12T01:22:24,821 DEBUG [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(1325): Online Regions={d644bbcb843c6264137c6748d170fd67=testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67.} 2024-12-12T01:22:24,821 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e4e1b654a5a995b6e71de7ab606443fe, disabling compactions & flushes 2024-12-12T01:22:24,821 DEBUG [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(1351): Waiting on d644bbcb843c6264137c6748d170fd67 2024-12-12T01:22:24,821 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. 2024-12-12T01:22:24,821 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. 2024-12-12T01:22:24,821 INFO [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(959): stopping server 36236b0d7605,44743,1733966101408 2024-12-12T01:22:24,821 INFO [RS:1;36236b0d7605:44743 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-12T01:22:24,821 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. after waiting 0 ms 2024-12-12T01:22:24,821 INFO [RS:1;36236b0d7605:44743 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;36236b0d7605:44743. 2024-12-12T01:22:24,821 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. 2024-12-12T01:22:24,821 DEBUG [RS:1;36236b0d7605:44743 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T01:22:24,821 DEBUG [RS:1;36236b0d7605:44743 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:22:24,822 INFO [RS:1;36236b0d7605:44743 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T01:22:24,822 INFO [RS:1;36236b0d7605:44743 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T01:22:24,822 INFO [RS:1;36236b0d7605:44743 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T01:22:24,822 INFO [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-12T01:22:24,822 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a76413eb65b7bad8091ed755bedda4fd, disabling compactions & flushes 2024-12-12T01:22:24,822 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. 2024-12-12T01:22:24,822 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. 2024-12-12T01:22:24,822 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. after waiting 0 ms 2024-12-12T01:22:24,822 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d644bbcb843c6264137c6748d170fd67, disabling compactions & flushes 2024-12-12T01:22:24,822 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. 2024-12-12T01:22:24,822 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. 2024-12-12T01:22:24,822 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. 2024-12-12T01:22:24,822 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. after waiting 0 ms 2024-12-12T01:22:24,822 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. 2024-12-12T01:22:24,822 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing a76413eb65b7bad8091ed755bedda4fd 1/1 column families, dataSize=1.65 KB heapSize=3.90 KB 2024-12-12T01:22:24,823 INFO [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(959): stopping server 36236b0d7605,38295,1733966101221 2024-12-12T01:22:24,823 INFO [RS:0;36236b0d7605:38295 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-12T01:22:24,823 INFO [RS:0;36236b0d7605:38295 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;36236b0d7605:38295. 2024-12-12T01:22:24,823 DEBUG [RS:0;36236b0d7605:38295 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T01:22:24,823 DEBUG [RS:0;36236b0d7605:38295 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:22:24,823 INFO [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-12T01:22:24,823 DEBUG [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(1325): Online Regions={e4e1b654a5a995b6e71de7ab606443fe=testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe.} 2024-12-12T01:22:24,823 DEBUG [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(1351): Waiting on e4e1b654a5a995b6e71de7ab606443fe 2024-12-12T01:22:24,829 INFO [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-12T01:22:24,829 DEBUG [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(1325): Online Regions={a76413eb65b7bad8091ed755bedda4fd=hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., 1588230740=hbase:meta,,1.1588230740} 2024-12-12T01:22:24,829 DEBUG [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a76413eb65b7bad8091ed755bedda4fd 2024-12-12T01:22:24,829 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-12T01:22:24,830 INFO [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-12T01:22:24,830 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-12T01:22:24,830 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T01:22:24,830 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T01:22:24,830 INFO [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=8.76 KB heapSize=14.76 KB 2024-12-12T01:22:24,834 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/info/4aa98cdfdd3a46b79001e4b2b42df514 is 121, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b./info:/1733966510632/DeleteFamily/seqid=0 2024-12-12T01:22:24,836 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/e4e1b654a5a995b6e71de7ab606443fe/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-12T01:22:24,837 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:22:24,837 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. 2024-12-12T01:22:24,837 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e4e1b654a5a995b6e71de7ab606443fe: Waiting for close lock at 1733966544821Running coprocessor pre-close hooks at 1733966544821Disabling compacts and flushes for region at 1733966544821Disabling writes for close at 1733966544821Writing region close event to WAL at 1733966544830 (+9 ms)Running coprocessor post-close hooks at 1733966544837 (+7 ms)Closed at 1733966544837 2024-12-12T01:22:24,837 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733966389156.e4e1b654a5a995b6e71de7ab606443fe. 2024-12-12T01:22:24,842 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/default/testExportExpiredSnapshot/d644bbcb843c6264137c6748d170fd67/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-12T01:22:24,842 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:22:24,842 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. 2024-12-12T01:22:24,842 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d644bbcb843c6264137c6748d170fd67: Waiting for close lock at 1733966544821Running coprocessor pre-close hooks at 1733966544822 (+1 ms)Disabling compacts and flushes for region at 1733966544822Disabling writes for close at 1733966544822Writing region close event to WAL at 1733966544830 (+8 ms)Running coprocessor post-close hooks at 1733966544842 (+12 ms)Closed at 1733966544842 2024-12-12T01:22:24,843 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733966389156.d644bbcb843c6264137c6748d170fd67. 2024-12-12T01:22:24,845 INFO [regionserver/36236b0d7605:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T01:22:24,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742416_1592 (size=6387) 2024-12-12T01:22:24,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742416_1592 (size=6387) 2024-12-12T01:22:24,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742416_1592 (size=6387) 2024-12-12T01:22:24,846 INFO [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.91 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/info/4aa98cdfdd3a46b79001e4b2b42df514 2024-12-12T01:22:24,852 INFO [regionserver/36236b0d7605:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T01:22:24,855 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/ns/fbf473379f7a434fb81313ebb8e1f3b9 is 119, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b./ns:/1733966510625/DeleteFamily/seqid=0 2024-12-12T01:22:24,855 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/acl/a76413eb65b7bad8091ed755bedda4fd/.tmp/l/a29011f0f0104d35a11042865048f4d1 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733966386898/DeleteFamily/seqid=0 2024-12-12T01:22:24,858 INFO [regionserver/36236b0d7605:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T01:22:24,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742417_1593 (size=5927) 2024-12-12T01:22:24,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742417_1593 (size=5927) 2024-12-12T01:22:24,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742417_1593 (size=5927) 2024-12-12T01:22:24,862 INFO [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=430 B at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/ns/fbf473379f7a434fb81313ebb8e1f3b9 2024-12-12T01:22:24,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742418_1594 (size=5860) 2024-12-12T01:22:24,870 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/rep_barrier/0e4ddde56c834a879046eb264b8875e3 is 128, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b./rep_barrier:/1733966510625/DeleteFamily/seqid=0 2024-12-12T01:22:24,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742418_1594 (size=5860) 2024-12-12T01:22:24,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742418_1594 (size=5860) 2024-12-12T01:22:24,871 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=31 (bloomFilter=false), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/acl/a76413eb65b7bad8091ed755bedda4fd/.tmp/l/a29011f0f0104d35a11042865048f4d1 2024-12-12T01:22:24,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742419_1595 (size=5990) 2024-12-12T01:22:24,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742419_1595 (size=5990) 2024-12-12T01:22:24,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742419_1595 (size=5990) 2024-12-12T01:22:24,875 INFO [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=466 B at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/rep_barrier/0e4ddde56c834a879046eb264b8875e3 2024-12-12T01:22:24,876 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a29011f0f0104d35a11042865048f4d1 2024-12-12T01:22:24,876 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/acl/a76413eb65b7bad8091ed755bedda4fd/.tmp/l/a29011f0f0104d35a11042865048f4d1 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/acl/a76413eb65b7bad8091ed755bedda4fd/l/a29011f0f0104d35a11042865048f4d1 2024-12-12T01:22:24,880 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a29011f0f0104d35a11042865048f4d1 2024-12-12T01:22:24,880 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/acl/a76413eb65b7bad8091ed755bedda4fd/l/a29011f0f0104d35a11042865048f4d1, entries=14, sequenceid=31, filesize=5.7 K 2024-12-12T01:22:24,881 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/table/4b41cf75565848b5b2a60f4344554d0b is 122, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733966486875.3d5c7f0d35e469d819acec02f89cfa9b./table:/1733966510625/DeleteFamily/seqid=0 2024-12-12T01:22:24,881 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for a76413eb65b7bad8091ed755bedda4fd in 59ms, sequenceid=31, compaction requested=false 2024-12-12T01:22:24,884 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/acl/a76413eb65b7bad8091ed755bedda4fd/recovered.edits/34.seqid, newMaxSeqId=34, maxSeqId=1 2024-12-12T01:22:24,885 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:22:24,885 INFO [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. 2024-12-12T01:22:24,885 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a76413eb65b7bad8091ed755bedda4fd: Waiting for close lock at 1733966544821Running coprocessor pre-close hooks at 1733966544822 (+1 ms)Disabling compacts and flushes for region at 1733966544822Disabling writes for close at 1733966544822Obtaining lock to block concurrent updates at 1733966544822Preparing flush snapshotting stores in a76413eb65b7bad8091ed755bedda4fd at 1733966544822Finished memstore snapshotting hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd., syncing WAL and waiting on mvcc, flushsize=dataSize=1694, getHeapSize=3976, getOffHeapSize=0, getCellsCount=27 at 1733966544822Flushing stores of hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. at 1733966544823 (+1 ms)Flushing a76413eb65b7bad8091ed755bedda4fd/l: creating writer at 1733966544823Flushing a76413eb65b7bad8091ed755bedda4fd/l: appending metadata at 1733966544855 (+32 ms)Flushing a76413eb65b7bad8091ed755bedda4fd/l: closing flushed file at 1733966544855Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b0f1e99: reopening flushed file at 1733966544876 (+21 ms)Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for a76413eb65b7bad8091ed755bedda4fd in 59ms, sequenceid=31, compaction requested=false at 1733966544881 (+5 ms)Writing region close event to WAL at 1733966544882 (+1 ms)Running coprocessor post-close hooks at 1733966544885 (+3 ms)Closed at 1733966544885 2024-12-12T01:22:24,885 DEBUG [RS_CLOSE_REGION-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733966104850.a76413eb65b7bad8091ed755bedda4fd. 2024-12-12T01:22:24,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742420_1596 (size=6012) 2024-12-12T01:22:24,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742420_1596 (size=6012) 2024-12-12T01:22:24,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742420_1596 (size=6012) 2024-12-12T01:22:24,887 INFO [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=996 B at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/table/4b41cf75565848b5b2a60f4344554d0b 2024-12-12T01:22:24,891 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/info/4aa98cdfdd3a46b79001e4b2b42df514 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/info/4aa98cdfdd3a46b79001e4b2b42df514 2024-12-12T01:22:24,894 INFO [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/info/4aa98cdfdd3a46b79001e4b2b42df514, entries=8, sequenceid=239, filesize=6.2 K 2024-12-12T01:22:24,895 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/ns/fbf473379f7a434fb81313ebb8e1f3b9 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/ns/fbf473379f7a434fb81313ebb8e1f3b9 2024-12-12T01:22:24,898 INFO [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/ns/fbf473379f7a434fb81313ebb8e1f3b9, entries=4, sequenceid=239, filesize=5.8 K 2024-12-12T01:22:24,899 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/rep_barrier/0e4ddde56c834a879046eb264b8875e3 as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/rep_barrier/0e4ddde56c834a879046eb264b8875e3 2024-12-12T01:22:24,902 INFO [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/rep_barrier/0e4ddde56c834a879046eb264b8875e3, entries=4, sequenceid=239, filesize=5.8 K 2024-12-12T01:22:24,903 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/.tmp/table/4b41cf75565848b5b2a60f4344554d0b as hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/table/4b41cf75565848b5b2a60f4344554d0b 2024-12-12T01:22:24,907 INFO [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/table/4b41cf75565848b5b2a60f4344554d0b, entries=6, sequenceid=239, filesize=5.9 K 2024-12-12T01:22:24,908 INFO [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~8.76 KB/8966, heapSize ~14.70 KB/15048, currentSize=0 B/0 for 1588230740 in 78ms, sequenceid=239, compaction requested=false 2024-12-12T01:22:24,912 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/data/hbase/meta/1588230740/recovered.edits/242.seqid, newMaxSeqId=242, maxSeqId=1 2024-12-12T01:22:24,912 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:22:24,912 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T01:22:24,912 INFO [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-12T01:22:24,912 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733966544829Running coprocessor pre-close hooks at 1733966544829Disabling compacts and flushes for region at 1733966544829Disabling writes for close at 1733966544830 (+1 ms)Obtaining lock to block concurrent updates at 1733966544830Preparing flush snapshotting stores in 1588230740 at 1733966544830Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=8966, getHeapSize=15048, getOffHeapSize=0, getCellsCount=68 at 1733966544830Flushing stores of hbase:meta,,1.1588230740 at 1733966544831 (+1 ms)Flushing 1588230740/info: creating writer at 1733966544831Flushing 1588230740/info: appending metadata at 1733966544833 (+2 ms)Flushing 1588230740/info: closing flushed file at 1733966544833Flushing 1588230740/ns: creating writer at 1733966544853 (+20 ms)Flushing 1588230740/ns: appending metadata at 1733966544855 (+2 ms)Flushing 1588230740/ns: closing flushed file at 1733966544855Flushing 1588230740/rep_barrier: creating writer at 1733966544866 (+11 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733966544870 (+4 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733966544870Flushing 1588230740/table: creating writer at 1733966544879 (+9 ms)Flushing 1588230740/table: appending metadata at 1733966544881 (+2 ms)Flushing 1588230740/table: closing flushed file at 1733966544881Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c353b1a: reopening flushed file at 1733966544890 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73f47828: reopening flushed file at 1733966544894 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7351994d: reopening flushed file at 1733966544898 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@494789ff: reopening flushed file at 1733966544903 (+5 ms)Finished flush of dataSize ~8.76 KB/8966, heapSize ~14.70 KB/15048, currentSize=0 B/0 for 1588230740 in 78ms, sequenceid=239, compaction requested=false at 1733966544908 (+5 ms)Writing region close event to WAL at 1733966544909 (+1 ms)Running coprocessor post-close hooks at 1733966544912 (+3 ms)Closed at 1733966544912 2024-12-12T01:22:24,912 DEBUG [RS_CLOSE_META-regionserver/36236b0d7605:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-12T01:22:25,022 INFO [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(976): stopping server 36236b0d7605,33883,1733966101479; all regions closed. 2024-12-12T01:22:25,023 INFO [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(976): stopping server 36236b0d7605,38295,1733966101221; all regions closed. 2024-12-12T01:22:25,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741834_1010 (size=11966) 2024-12-12T01:22:25,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741835_1011 (size=18543) 2024-12-12T01:22:25,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741835_1011 (size=18543) 2024-12-12T01:22:25,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741834_1010 (size=11966) 2024-12-12T01:22:25,028 DEBUG [RS:0;36236b0d7605:38295 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/oldWALs 2024-12-12T01:22:25,028 DEBUG [RS:2;36236b0d7605:33883 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/oldWALs 2024-12-12T01:22:25,028 INFO [RS:2;36236b0d7605:33883 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 36236b0d7605%2C33883%2C1733966101479:(num 1733966103691) 2024-12-12T01:22:25,028 INFO [RS:0;36236b0d7605:38295 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 36236b0d7605%2C38295%2C1733966101221:(num 1733966103689) 2024-12-12T01:22:25,028 DEBUG [RS:2;36236b0d7605:33883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:22:25,028 DEBUG [RS:0;36236b0d7605:38295 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:22:25,028 INFO [RS:2;36236b0d7605:33883 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T01:22:25,028 INFO [RS:0;36236b0d7605:38295 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T01:22:25,028 INFO [RS:0;36236b0d7605:38295 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-12T01:22:25,028 INFO [RS:2;36236b0d7605:33883 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-12T01:22:25,029 INFO [RS:2;36236b0d7605:33883 {}] hbase.ChoreService(370): Chore service for: regionserver/36236b0d7605:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-12T01:22:25,029 INFO [RS:0;36236b0d7605:38295 {}] hbase.ChoreService(370): Chore service for: regionserver/36236b0d7605:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-12T01:22:25,029 INFO [RS:0;36236b0d7605:38295 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T01:22:25,029 INFO [RS:2;36236b0d7605:33883 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T01:22:25,029 INFO [RS:0;36236b0d7605:38295 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T01:22:25,029 INFO [RS:2;36236b0d7605:33883 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T01:22:25,029 INFO [RS:0;36236b0d7605:38295 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T01:22:25,029 INFO [RS:2;36236b0d7605:33883 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T01:22:25,029 INFO [RS:0;36236b0d7605:38295 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-12T01:22:25,029 INFO [RS:2;36236b0d7605:33883 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-12T01:22:25,029 INFO [regionserver/36236b0d7605:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-12T01:22:25,029 INFO [regionserver/36236b0d7605:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-12T01:22:25,029 INFO [RS:2;36236b0d7605:33883 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33883 2024-12-12T01:22:25,029 INFO [RS:0;36236b0d7605:38295 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38295 2024-12-12T01:22:25,030 INFO [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(976): stopping server 36236b0d7605,44743,1733966101408; all regions closed. 2024-12-12T01:22:25,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741836_1012 (size=100501) 2024-12-12T01:22:25,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741836_1012 (size=100501) 2024-12-12T01:22:25,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/36236b0d7605,33883,1733966101479 2024-12-12T01:22:25,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T01:22:25,034 INFO [RS:2;36236b0d7605:33883 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-12T01:22:25,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/36236b0d7605,38295,1733966101221 2024-12-12T01:22:25,035 INFO [RS:0;36236b0d7605:38295 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-12T01:22:25,035 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [36236b0d7605,33883,1733966101479] 2024-12-12T01:22:25,036 DEBUG [RS:1;36236b0d7605:44743 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/oldWALs 2024-12-12T01:22:25,036 INFO [RS:1;36236b0d7605:44743 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 36236b0d7605%2C44743%2C1733966101408.meta:.meta(num 1733966104180) 2024-12-12T01:22:25,038 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/36236b0d7605,33883,1733966101479 already deleted, retry=false 2024-12-12T01:22:25,038 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 36236b0d7605,33883,1733966101479 expired; onlineServers=2 2024-12-12T01:22:25,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741833_1009 (size=12590) 2024-12-12T01:22:25,038 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [36236b0d7605,38295,1733966101221] 2024-12-12T01:22:25,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073741833_1009 (size=12590) 2024-12-12T01:22:25,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073741833_1009 (size=12590) 2024-12-12T01:22:25,040 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/36236b0d7605,38295,1733966101221 already deleted, retry=false 2024-12-12T01:22:25,040 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 36236b0d7605,38295,1733966101221 expired; onlineServers=1 2024-12-12T01:22:25,041 DEBUG [RS:1;36236b0d7605:44743 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/oldWALs 2024-12-12T01:22:25,041 INFO [RS:1;36236b0d7605:44743 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 36236b0d7605%2C44743%2C1733966101408:(num 1733966103688) 2024-12-12T01:22:25,041 DEBUG [RS:1;36236b0d7605:44743 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T01:22:25,041 INFO [RS:1;36236b0d7605:44743 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T01:22:25,041 INFO [RS:1;36236b0d7605:44743 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-12T01:22:25,041 INFO [RS:1;36236b0d7605:44743 {}] hbase.ChoreService(370): Chore service for: regionserver/36236b0d7605:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-12T01:22:25,041 INFO [RS:1;36236b0d7605:44743 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-12T01:22:25,041 INFO [regionserver/36236b0d7605:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-12T01:22:25,042 INFO [RS:1;36236b0d7605:44743 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44743 2024-12-12T01:22:25,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/36236b0d7605,44743,1733966101408 2024-12-12T01:22:25,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T01:22:25,044 INFO [RS:1;36236b0d7605:44743 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-12T01:22:25,045 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [36236b0d7605,44743,1733966101408] 2024-12-12T01:22:25,046 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/36236b0d7605,44743,1733966101408 already deleted, retry=false 2024-12-12T01:22:25,046 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 36236b0d7605,44743,1733966101408 expired; onlineServers=0 2024-12-12T01:22:25,046 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '36236b0d7605,36083,1733966100114' ***** 2024-12-12T01:22:25,046 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-12T01:22:25,046 INFO [M:0;36236b0d7605:36083 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-12T01:22:25,046 INFO [M:0;36236b0d7605:36083 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-12T01:22:25,046 DEBUG [M:0;36236b0d7605:36083 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-12T01:22:25,047 DEBUG [M:0;36236b0d7605:36083 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-12T01:22:25,047 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-12T01:22:25,047 DEBUG [master/36236b0d7605:0:becomeActiveMaster-HFileCleaner.small.0-1733966103086 {}] cleaner.HFileCleaner(306): Exit Thread[master/36236b0d7605:0:becomeActiveMaster-HFileCleaner.small.0-1733966103086,5,FailOnTimeoutGroup] 2024-12-12T01:22:25,047 DEBUG [master/36236b0d7605:0:becomeActiveMaster-HFileCleaner.large.0-1733966103068 {}] cleaner.HFileCleaner(306): Exit Thread[master/36236b0d7605:0:becomeActiveMaster-HFileCleaner.large.0-1733966103068,5,FailOnTimeoutGroup] 2024-12-12T01:22:25,047 INFO [M:0;36236b0d7605:36083 {}] hbase.ChoreService(370): Chore service for: master/36236b0d7605:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-12T01:22:25,047 INFO [M:0;36236b0d7605:36083 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-12T01:22:25,047 DEBUG [M:0;36236b0d7605:36083 {}] master.HMaster(1795): Stopping service threads 2024-12-12T01:22:25,047 INFO [M:0;36236b0d7605:36083 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-12T01:22:25,047 INFO [M:0;36236b0d7605:36083 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-12T01:22:25,048 INFO [M:0;36236b0d7605:36083 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-12T01:22:25,048 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-12T01:22:25,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-12T01:22:25,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T01:22:25,049 DEBUG [M:0;36236b0d7605:36083 {}] zookeeper.ZKUtil(347): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-12T01:22:25,049 WARN [M:0;36236b0d7605:36083 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-12T01:22:25,050 INFO [M:0;36236b0d7605:36083 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/.lastflushedseqids 2024-12-12T01:22:25,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42945 is added to blk_1073742421_1597 (size=325) 2024-12-12T01:22:25,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073742421_1597 (size=325) 2024-12-12T01:22:25,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34547 is added to blk_1073742421_1597 (size=325) 2024-12-12T01:22:25,065 INFO [M:0;36236b0d7605:36083 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-12T01:22:25,065 INFO [M:0;36236b0d7605:36083 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-12T01:22:25,065 DEBUG [M:0;36236b0d7605:36083 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T01:22:25,078 INFO [M:0;36236b0d7605:36083 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T01:22:25,079 DEBUG [M:0;36236b0d7605:36083 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T01:22:25,079 DEBUG [M:0;36236b0d7605:36083 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T01:22:25,079 DEBUG [M:0;36236b0d7605:36083 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T01:22:25,079 INFO [M:0;36236b0d7605:36083 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=979.47 KB heapSize=1.15 MB 2024-12-12T01:22:25,079 ERROR [AsyncFSWAL-0-hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData-prefix:36236b0d7605,36083,1733966100114 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData-prefix:36236b0d7605,36083,1733966100114,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T01:22:25,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T01:22:25,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38295-0x10083caab1a0001, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T01:22:25,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T01:22:25,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33883-0x10083caab1a0003, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T01:22:25,141 INFO [RS:0;36236b0d7605:38295 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-12T01:22:25,141 INFO [RS:2;36236b0d7605:33883 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-12T01:22:25,141 INFO [RS:2;36236b0d7605:33883 {}] regionserver.HRegionServer(1031): Exiting; stopping=36236b0d7605,33883,1733966101479; zookeeper connection closed. 2024-12-12T01:22:25,141 INFO [RS:0;36236b0d7605:38295 {}] regionserver.HRegionServer(1031): Exiting; stopping=36236b0d7605,38295,1733966101221; zookeeper connection closed. 2024-12-12T01:22:25,142 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@8debb97 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@8debb97 2024-12-12T01:22:25,142 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@485cb79a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@485cb79a 2024-12-12T01:22:25,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T01:22:25,145 INFO [RS:1;36236b0d7605:44743 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-12T01:22:25,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44743-0x10083caab1a0002, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T01:22:25,145 INFO [RS:1;36236b0d7605:44743 {}] regionserver.HRegionServer(1031): Exiting; stopping=36236b0d7605,44743,1733966101408; zookeeper connection closed. 2024-12-12T01:22:25,145 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5c9635af {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5c9635af 2024-12-12T01:22:25,146 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-12T01:22:28,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741834_1010 (size=11966) 2024-12-12T01:22:28,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741835_1011 (size=18543) 2024-12-12T01:22:28,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741836_1012 (size=100501) 2024-12-12T01:22:28,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39595 is added to blk_1073741830_1006 (size=1151714) 2024-12-12T01:22:28,914 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:22:30,358 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:22:30,711 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:22:30,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T01:22:30,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-12T01:22:30,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-12T01:22:30,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-12T01:22:30,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:22:30,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-12T01:22:30,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-12T01:22:36,213 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-12T01:22:58,914 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;36236b0d7605:36083 239 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 29 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@5c7c5d55 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 26 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15762f6f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5121 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.CountDownLatch$Sync@b139fe2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11369 Waited count: 12179 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 14 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@2a94cd10 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@e3d927a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@bf61b4): State: TIMED_WAITING Blocked count: 0 Waited count: 1019 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1915521514-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1915521514-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1915521514-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1915521514-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1915521514-41-acceptor-0@64a549f8-ServerConnector@4f00f70b{HTTP/1.1, (http/1.1)}{localhost:33705}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1915521514-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1915521514-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1915521514-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-11478400-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 40 Waited count: 3243 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2012b3d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33353): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@71ecfe19): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 170 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4afbb8fc): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 171 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 50052 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1579 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@61dad15e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33353): State: TIMED_WAITING Blocked count: 69 Waited count: 2469 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33353): State: TIMED_WAITING Blocked count: 66 Waited count: 2462 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33353): State: TIMED_WAITING Blocked count: 98 Waited count: 2483 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33353): State: TIMED_WAITING Blocked count: 100 Waited count: 2473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33353): State: TIMED_WAITING Blocked count: 74 Waited count: 2481 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5d18f67d): State: TIMED_WAITING Blocked count: 0 Waited count: 255 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@17d3a048): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@65a3f04a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@316beb8c): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(826874655)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp988370956-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp988370956-87-acceptor-0@406e3e45-ServerConnector@50927177{HTTP/1.1, (http/1.1)}{localhost:40107}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp988370956-88): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp988370956-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-6fb23b47-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@11bf4d1): State: TIMED_WAITING Blocked count: 0 Waited count: 1016 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 40851): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 324 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f578741 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 1370 Waited count: 1550 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3652a1d2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 508 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 509 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 509 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 508 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 508 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 110 (IPC Client (166867004) connection to localhost/127.0.0.1:33353 from jenkins): State: TIMED_WAITING Blocked count: 1487 Waited count: 1488 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 111 (IPC Parameter Sending Thread for localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 0 Waited count: 2134 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp558478696-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp558478696-121-acceptor-0@5c07940c-ServerConnector@41d36900{HTTP/1.1, (http/1.1)}{localhost:42853}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp558478696-122): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp558478696-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-2536a978-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65942ca9): State: TIMED_WAITING Blocked count: 0 Waited count: 1014 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 34129): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 1 Waited count: 297 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18ecd284 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 1383 Waited count: 1557 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5b3b9a90): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 509 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 509 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data1)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 157 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 160 (qtp1414918684-160): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (qtp1414918684-162-acceptor-0@67d4dc1c-ServerConnector@3c465c66{HTTP/1.1, (http/1.1)}{localhost:45835}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 164 (qtp1414918684-164): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 166 (qtp1414918684-166): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data1/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (Session-HouseKeeper-4356fc3b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data2/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data3/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 186 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data4/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (java.util.concurrent.ThreadPoolExecutor$Worker@362a7361[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@22b395e0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4af8baf3): State: TIMED_WAITING Blocked count: 0 Waited count: 1013 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 201 (IPC Server idle connection scanner for port 35865): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 203 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 206 (Command processor): State: WAITING Blocked count: 0 Waited count: 303 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aea441 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 207 (BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 1360 Waited count: 1544 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7cd66212): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 199 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 209 (IPC Server handler 0 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 511 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 3 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 507 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 4 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data5/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data6/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-34-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@aba6f66[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:54997): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 254 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 7 Waited count: 395 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e781bcd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:54997):): State: WAITING Blocked count: 2 Waited count: 477 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63202a16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 528 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4273a629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 246 (LeaseRenewer:jenkins@localhost:33353): State: TIMED_WAITING Blocked count: 14 Waited count: 527 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@3dda3aa8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 356 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 20 Waited count: 8 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:54997)): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d560b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 75 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1478565a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 7 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@781b3107 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083): State: WAITING Blocked count: 103 Waited count: 473 Waiting on java.util.concurrent.Semaphore$NonfairSync@617665a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083): State: WAITING Blocked count: 140 Waited count: 539 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a8d41d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083): State: WAITING Blocked count: 83 Waited count: 10367 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c11145e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36083): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3da39dd2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3da39dd2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6faa9487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f89d378 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b2e8d33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7ac2c689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41777bfa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 44 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;36236b0d7605:36083): State: TIMED_WAITING Blocked count: 12 Waited count: 4436 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1100/0x00007f44a4f72c98.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/36236b0d7605:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/36236b0d7605:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@43e497f9): State: TIMED_WAITING Blocked count: 0 Waited count: 168 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5022 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 51 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 62 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 161 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 37 Waited count: 12 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 34 Waited count: 15 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 451 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1490f112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (regionserver/36236b0d7605:0.procedureResultReporter): State: WAITING Blocked count: 23 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@255ccb36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/36236b0d7605:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e31b793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 469 (regionserver/36236b0d7605:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@559e6eda Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 495 (LeaseRenewer:jenkins.hfs.1@localhost:33353): State: TIMED_WAITING Blocked count: 14 Waited count: 528 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 506 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 515 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 516 (region-location-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 49965 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 549 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 497 Waiting on java.util.concurrent.ForkJoinPool@5d37b95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 577 (region-location-1): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 578 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 991 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 933 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1076 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1090 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 113 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@50639122 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1134 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1135 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1136 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1198 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1199 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1200 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1249 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1250 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1251 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1253 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1254 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1609 (Container metrics unregistration): State: WAITING Blocked count: 12 Waited count: 48 Waiting on java.util.TaskQueue@3ef03be9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1967 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1968 (region-location-4): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2268 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 472 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 3573 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 693 Waiting on java.util.concurrent.ForkJoinPool@5d37b95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6092 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6093 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8459 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 44 Waiting on java.util.concurrent.ForkJoinPool@5d37b95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10139 (java.util.concurrent.ThreadPoolExecutor$Worker@7f2aecc6[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20e1f071 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10262 (AsyncFSWAL-1-hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData-prefix:36236b0d7605,36083,1733966100114): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@381854af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10265 (java.util.concurrent.ThreadPoolExecutor$Worker@56ee2421[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10266 (java.util.concurrent.ThreadPoolExecutor$Worker@48d934a0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10269 (java.util.concurrent.ThreadPoolExecutor$Worker@180e9a76[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10270 (java.util.concurrent.ThreadPoolExecutor$Worker@50f7f3ff[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10274 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-12T01:23:28,914 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:23:58,914 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;36236b0d7605:36083 233 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 29 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@5c7c5d55 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 23 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 30 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15762f6f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5721 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 58 Waiting on java.util.concurrent.CountDownLatch$Sync@1ea4f471 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11369 Waited count: 12180 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 14 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@2a94cd10 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@e3d927a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@bf61b4): State: TIMED_WAITING Blocked count: 0 Waited count: 1139 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1915521514-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1915521514-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1915521514-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1915521514-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1915521514-41-acceptor-0@64a549f8-ServerConnector@4f00f70b{HTTP/1.1, (http/1.1)}{localhost:33705}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1915521514-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1915521514-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1915521514-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-11478400-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 40 Waited count: 3243 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2012b3d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33353): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@71ecfe19): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 190 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4afbb8fc): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 191 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 56016 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1579 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@61dad15e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33353): State: TIMED_WAITING Blocked count: 69 Waited count: 2530 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33353): State: TIMED_WAITING Blocked count: 66 Waited count: 2524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33353): State: TIMED_WAITING Blocked count: 98 Waited count: 2544 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33353): State: TIMED_WAITING Blocked count: 100 Waited count: 2534 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33353): State: TIMED_WAITING Blocked count: 74 Waited count: 2542 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5d18f67d): State: TIMED_WAITING Blocked count: 0 Waited count: 285 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@17d3a048): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@65a3f04a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@316beb8c): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(826874655)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp988370956-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp988370956-87-acceptor-0@406e3e45-ServerConnector@50927177{HTTP/1.1, (http/1.1)}{localhost:40107}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp988370956-88): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp988370956-89): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-6fb23b47-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@11bf4d1): State: TIMED_WAITING Blocked count: 0 Waited count: 1136 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 40851): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 344 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f578741 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 1390 Waited count: 1590 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3652a1d2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 568 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 568 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 568 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 110 (IPC Client (166867004) connection to localhost/127.0.0.1:33353 from jenkins): State: TIMED_WAITING Blocked count: 1547 Waited count: 1548 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 111 (IPC Parameter Sending Thread for localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 0 Waited count: 2194 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp558478696-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp558478696-121-acceptor-0@5c07940c-ServerConnector@41d36900{HTTP/1.1, (http/1.1)}{localhost:42853}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp558478696-122): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp558478696-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-2536a978-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65942ca9): State: TIMED_WAITING Blocked count: 0 Waited count: 1134 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 34129): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 1 Waited count: 317 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18ecd284 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 1403 Waited count: 1597 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5b3b9a90): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data1)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 157 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 160 (qtp1414918684-160): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (qtp1414918684-162-acceptor-0@67d4dc1c-ServerConnector@3c465c66{HTTP/1.1, (http/1.1)}{localhost:45835}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 164 (qtp1414918684-164): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 166 (qtp1414918684-166): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data1/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (Session-HouseKeeper-4356fc3b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data2/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data3/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 186 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data4/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (java.util.concurrent.ThreadPoolExecutor$Worker@362a7361[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@22b395e0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4af8baf3): State: TIMED_WAITING Blocked count: 0 Waited count: 1133 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 201 (IPC Server idle connection scanner for port 35865): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 203 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 206 (Command processor): State: WAITING Blocked count: 0 Waited count: 323 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aea441 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 207 (BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 1380 Waited count: 1584 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7cd66212): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 199 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 209 (IPC Server handler 0 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 571 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 3 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 567 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 4 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 572 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data5/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data6/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-34-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@aba6f66[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:54997): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 284 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 7 Waited count: 400 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e781bcd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:54997):): State: WAITING Blocked count: 2 Waited count: 482 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63202a16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 533 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4273a629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@3dda3aa8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 384 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 20 Waited count: 8 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:54997)): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d560b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 75 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1478565a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 7 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@781b3107 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083): State: WAITING Blocked count: 103 Waited count: 473 Waiting on java.util.concurrent.Semaphore$NonfairSync@617665a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083): State: WAITING Blocked count: 140 Waited count: 539 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a8d41d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083): State: WAITING Blocked count: 83 Waited count: 10367 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c11145e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36083): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3da39dd2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3da39dd2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6faa9487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f89d378 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b2e8d33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7ac2c689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41777bfa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 44 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;36236b0d7605:36083): State: TIMED_WAITING Blocked count: 12 Waited count: 4436 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1100/0x00007f44a4f72c98.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/36236b0d7605:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/36236b0d7605:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@43e497f9): State: TIMED_WAITING Blocked count: 0 Waited count: 188 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5622 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 51 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 62 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 173 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79444e40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56142 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 37 Waited count: 12 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 34 Waited count: 15 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 451 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1490f112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (regionserver/36236b0d7605:0.procedureResultReporter): State: WAITING Blocked count: 23 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@255ccb36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/36236b0d7605:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e31b793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 469 (regionserver/36236b0d7605:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@559e6eda Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 506 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 515 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 516 (region-location-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55967 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 549 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 577 (region-location-1): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 578 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 991 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 939 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1076 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1090 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 113 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@50639122 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1134 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1135 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1136 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1198 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1199 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1200 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1249 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1250 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1251 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1253 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1254 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1609 (Container metrics unregistration): State: WAITING Blocked count: 12 Waited count: 48 Waiting on java.util.TaskQueue@3ef03be9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1967 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1968 (region-location-4): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3573 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 693 Waiting on java.util.concurrent.ForkJoinPool@5d37b95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6092 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6093 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8459 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 44 Waiting on java.util.concurrent.ForkJoinPool@5d37b95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10139 (java.util.concurrent.ThreadPoolExecutor$Worker@7f2aecc6[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20e1f071 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10262 (AsyncFSWAL-1-hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData-prefix:36236b0d7605,36083,1733966100114): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@381854af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10274 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 10275 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-12T01:24:28,914 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:24:58,915 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:25:01,609 DEBUG [master/36236b0d7605:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=28, reuseRatio=73.68% 2024-12-12T01:25:01,610 DEBUG [master/36236b0d7605:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-12T01:25:09,761 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;36236b0d7605:36083 232 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 29 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@5c7c5d55 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 32 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15762f6f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6321 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 64 Waiting on java.util.concurrent.CountDownLatch$Sync@2319dee9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11369 Waited count: 12181 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 14 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@2a94cd10 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@e3d927a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@bf61b4): State: TIMED_WAITING Blocked count: 0 Waited count: 1259 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1915521514-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1915521514-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1915521514-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1915521514-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1915521514-41-acceptor-0@64a549f8-ServerConnector@4f00f70b{HTTP/1.1, (http/1.1)}{localhost:33705}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1915521514-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1915521514-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1915521514-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-11478400-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 40 Waited count: 3243 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2012b3d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33353): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@71ecfe19): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4afbb8fc): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 211 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 61979 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1579 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@61dad15e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33353): State: TIMED_WAITING Blocked count: 69 Waited count: 2591 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33353): State: TIMED_WAITING Blocked count: 66 Waited count: 2585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33353): State: TIMED_WAITING Blocked count: 98 Waited count: 2605 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33353): State: TIMED_WAITING Blocked count: 100 Waited count: 2595 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33353): State: TIMED_WAITING Blocked count: 74 Waited count: 2604 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5d18f67d): State: TIMED_WAITING Blocked count: 0 Waited count: 315 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@17d3a048): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@65a3f04a): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@316beb8c): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(826874655)): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp988370956-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp988370956-87-acceptor-0@406e3e45-ServerConnector@50927177{HTTP/1.1, (http/1.1)}{localhost:40107}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp988370956-88): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp988370956-89): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-6fb23b47-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@11bf4d1): State: TIMED_WAITING Blocked count: 0 Waited count: 1256 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 40851): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 364 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f578741 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 1410 Waited count: 1630 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3652a1d2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 628 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 628 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 628 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 110 (IPC Client (166867004) connection to localhost/127.0.0.1:33353 from jenkins): State: TIMED_WAITING Blocked count: 1607 Waited count: 1608 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 111 (IPC Parameter Sending Thread for localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 0 Waited count: 2254 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp558478696-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp558478696-121-acceptor-0@5c07940c-ServerConnector@41d36900{HTTP/1.1, (http/1.1)}{localhost:42853}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp558478696-122): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp558478696-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-2536a978-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65942ca9): State: TIMED_WAITING Blocked count: 0 Waited count: 1254 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 34129): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 1 Waited count: 337 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18ecd284 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 1423 Waited count: 1637 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5b3b9a90): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 630 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 632 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 630 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data1)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 157 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 160 (qtp1414918684-160): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (qtp1414918684-162-acceptor-0@67d4dc1c-ServerConnector@3c465c66{HTTP/1.1, (http/1.1)}{localhost:45835}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 164 (qtp1414918684-164): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 166 (qtp1414918684-166): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data1/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (Session-HouseKeeper-4356fc3b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data2/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data3/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 186 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22df2b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data4/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (java.util.concurrent.ThreadPoolExecutor$Worker@362a7361[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27ddbed9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@22b395e0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4af8baf3): State: TIMED_WAITING Blocked count: 0 Waited count: 1253 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 201 (IPC Server idle connection scanner for port 35865): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 203 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 206 (Command processor): State: WAITING Blocked count: 0 Waited count: 343 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aea441 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 207 (BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 1400 Waited count: 1624 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7cd66212): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 199 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 209 (IPC Server handler 0 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 632 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 631 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 630 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 3 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 627 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 4 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 632 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data5/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data6/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-34-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ca3aa39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@aba6f66[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:54997): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 314 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 7 Waited count: 404 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e781bcd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:54997):): State: WAITING Blocked count: 2 Waited count: 486 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63202a16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 537 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4273a629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@3dda3aa8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 415 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 20 Waited count: 8 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:54997)): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d560b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 75 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1478565a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 7 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@781b3107 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083): State: WAITING Blocked count: 103 Waited count: 473 Waiting on java.util.concurrent.Semaphore$NonfairSync@617665a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083): State: WAITING Blocked count: 140 Waited count: 539 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a8d41d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083): State: WAITING Blocked count: 83 Waited count: 10367 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c11145e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36083): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3da39dd2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3da39dd2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6faa9487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f89d378 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b2e8d33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7ac2c689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41777bfa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 44 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;36236b0d7605:36083): State: TIMED_WAITING Blocked count: 12 Waited count: 4436 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1100/0x00007f44a4f72c98.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/36236b0d7605:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/36236b0d7605:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@43e497f9): State: TIMED_WAITING Blocked count: 0 Waited count: 208 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6221 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 51 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 62 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 173 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79444e40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 62143 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 37 Waited count: 12 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 34 Waited count: 15 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 451 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1490f112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (regionserver/36236b0d7605:0.procedureResultReporter): State: WAITING Blocked count: 23 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@255ccb36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/36236b0d7605:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e31b793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 469 (regionserver/36236b0d7605:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@559e6eda Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 506 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 515 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 516 (region-location-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 61968 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 577 (region-location-1): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 578 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 991 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 945 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1076 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1090 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 113 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@50639122 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1134 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1135 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1136 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1198 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1199 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1200 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1249 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1250 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1251 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1253 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1254 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1609 (Container metrics unregistration): State: WAITING Blocked count: 12 Waited count: 48 Waiting on java.util.TaskQueue@3ef03be9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1967 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1968 (region-location-4): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3573 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 694 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6092 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6093 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8459 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 44 Waiting on java.util.concurrent.ForkJoinPool@5d37b95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10139 (java.util.concurrent.ThreadPoolExecutor$Worker@7f2aecc6[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20e1f071 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10262 (AsyncFSWAL-1-hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData-prefix:36236b0d7605,36083,1733966100114): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@381854af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10275 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10282 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-12T01:25:28,915 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:25:58,915 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;36236b0d7605:36083 230 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 29 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@5c7c5d55 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 36 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15762f6f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6920 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 70 Waiting on java.util.concurrent.CountDownLatch$Sync@65701be9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11369 Waited count: 12182 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 14 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@2a94cd10 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@e3d927a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@bf61b4): State: TIMED_WAITING Blocked count: 0 Waited count: 1379 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1915521514-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1915521514-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1915521514-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1915521514-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1915521514-41-acceptor-0@64a549f8-ServerConnector@4f00f70b{HTTP/1.1, (http/1.1)}{localhost:33705}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1915521514-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1915521514-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1915521514-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-11478400-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 40 Waited count: 3243 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2012b3d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33353): State: TIMED_WAITING Blocked count: 1 Waited count: 70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@71ecfe19): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 230 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4afbb8fc): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 231 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 67943 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1579 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@61dad15e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33353): State: TIMED_WAITING Blocked count: 69 Waited count: 2653 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33353): State: TIMED_WAITING Blocked count: 66 Waited count: 2646 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33353): State: TIMED_WAITING Blocked count: 98 Waited count: 2666 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33353): State: TIMED_WAITING Blocked count: 100 Waited count: 2656 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33353): State: TIMED_WAITING Blocked count: 74 Waited count: 2664 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5d18f67d): State: TIMED_WAITING Blocked count: 0 Waited count: 345 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@17d3a048): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@65a3f04a): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@316beb8c): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(826874655)): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp988370956-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp988370956-87-acceptor-0@406e3e45-ServerConnector@50927177{HTTP/1.1, (http/1.1)}{localhost:40107}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp988370956-88): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp988370956-89): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-6fb23b47-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@11bf4d1): State: TIMED_WAITING Blocked count: 0 Waited count: 1376 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 40851): State: TIMED_WAITING Blocked count: 1 Waited count: 70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 384 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f578741 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 1430 Waited count: 1670 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3652a1d2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 688 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 688 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 688 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 110 (IPC Client (166867004) connection to localhost/127.0.0.1:33353 from jenkins): State: TIMED_WAITING Blocked count: 1667 Waited count: 1668 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 111 (IPC Parameter Sending Thread for localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 0 Waited count: 2314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp558478696-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp558478696-121-acceptor-0@5c07940c-ServerConnector@41d36900{HTTP/1.1, (http/1.1)}{localhost:42853}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp558478696-122): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp558478696-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-2536a978-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65942ca9): State: TIMED_WAITING Blocked count: 0 Waited count: 1374 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 34129): State: TIMED_WAITING Blocked count: 1 Waited count: 70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 1 Waited count: 357 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18ecd284 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 1443 Waited count: 1677 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5b3b9a90): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 690 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 692 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 690 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data1)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 157 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 160 (qtp1414918684-160): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (qtp1414918684-162-acceptor-0@67d4dc1c-ServerConnector@3c465c66{HTTP/1.1, (http/1.1)}{localhost:45835}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 164 (qtp1414918684-164): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 166 (qtp1414918684-166): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data1/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (Session-HouseKeeper-4356fc3b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data2/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data3/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 186 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22df2b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data4/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (java.util.concurrent.ThreadPoolExecutor$Worker@362a7361[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27ddbed9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@22b395e0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4af8baf3): State: TIMED_WAITING Blocked count: 0 Waited count: 1373 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 201 (IPC Server idle connection scanner for port 35865): State: TIMED_WAITING Blocked count: 1 Waited count: 70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 203 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 206 (Command processor): State: WAITING Blocked count: 0 Waited count: 363 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aea441 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 207 (BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 1420 Waited count: 1664 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7cd66212): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 199 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 209 (IPC Server handler 0 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 692 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 691 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 690 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 3 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 687 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 4 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 692 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data5/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data6/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-34-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ca3aa39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@aba6f66[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:54997): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 344 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 7 Waited count: 408 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e781bcd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:54997):): State: WAITING Blocked count: 2 Waited count: 490 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63202a16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 541 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4273a629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@3dda3aa8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 443 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 20 Waited count: 8 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:54997)): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d560b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 75 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1478565a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 7 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@781b3107 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083): State: WAITING Blocked count: 103 Waited count: 473 Waiting on java.util.concurrent.Semaphore$NonfairSync@617665a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083): State: WAITING Blocked count: 140 Waited count: 539 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a8d41d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083): State: WAITING Blocked count: 83 Waited count: 10367 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c11145e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36083): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3da39dd2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3da39dd2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6faa9487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f89d378 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b2e8d33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7ac2c689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41777bfa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 44 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;36236b0d7605:36083): State: TIMED_WAITING Blocked count: 12 Waited count: 4436 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1100/0x00007f44a4f72c98.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/36236b0d7605:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/36236b0d7605:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@43e497f9): State: TIMED_WAITING Blocked count: 0 Waited count: 228 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6821 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 51 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 62 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 173 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79444e40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68145 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 37 Waited count: 12 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 34 Waited count: 15 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 451 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1490f112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (regionserver/36236b0d7605:0.procedureResultReporter): State: WAITING Blocked count: 23 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@255ccb36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/36236b0d7605:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e31b793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 469 (regionserver/36236b0d7605:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@559e6eda Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 506 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 515 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 516 (region-location-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67970 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 577 (region-location-1): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 578 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 991 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1076 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1090 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 113 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@50639122 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1134 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1135 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1136 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1198 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1199 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1200 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1249 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1250 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1251 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1253 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1254 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1609 (Container metrics unregistration): State: WAITING Blocked count: 12 Waited count: 48 Waiting on java.util.TaskQueue@3ef03be9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1967 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1968 (region-location-4): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6092 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6093 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8459 (ForkJoinPool.commonPool-worker-6): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10139 (java.util.concurrent.ThreadPoolExecutor$Worker@7f2aecc6[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20e1f071 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10262 (AsyncFSWAL-1-hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData-prefix:36236b0d7605,36083,1733966100114): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@381854af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10282 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-12T01:26:28,915 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:26:58,915 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:27:25,080 DEBUG [M:0;36236b0d7605:36083 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733966545065Disabling compacts and flushes for region at 1733966545065Disabling writes for close at 1733966545079 (+14 ms)Obtaining lock to block concurrent updates at 1733966545079Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733966545079Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=1002976, getHeapSize=1202984, getOffHeapSize=0, getCellsCount=2638 at 1733966545079Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1733966845080 (+300001 ms) 2024-12-12T01:27:25,080 WARN [M:0;36236b0d7605:36083 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4533, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4533, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-12-12T01:27:25,082 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T01:27:25,083 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-12T01:27:25,083 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-12T01:27:25,083 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/WALs/36236b0d7605,36083,1733966100114/36236b0d7605%2C36083%2C1733966100114.1733966102193 2024-12-12T01:27:25,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/WALs/36236b0d7605,36083,1733966100114/36236b0d7605%2C36083%2C1733966100114.1733966102193 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T01:27:25,086 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T01:27:25,086 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/WALs/36236b0d7605,36083,1733966100114/36236b0d7605%2C36083%2C1733966100114.1733966102193 2024-12-12T01:27:25,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/WALs/36236b0d7605,36083,1733966100114/36236b0d7605%2C36083%2C1733966100114.1733966102193 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;36236b0d7605:36083 231 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 29 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@5c7c5d55 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 38 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15762f6f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7520 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 76 Waiting on java.util.concurrent.CountDownLatch$Sync@4011e34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11369 Waited count: 12183 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 14 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@2a94cd10 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@e3d927a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@bf61b4): State: TIMED_WAITING Blocked count: 0 Waited count: 1499 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 150 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1915521514-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1915521514-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1915521514-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1915521514-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1915521514-41-acceptor-0@64a549f8-ServerConnector@4f00f70b{HTTP/1.1, (http/1.1)}{localhost:33705}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1915521514-42): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1915521514-43): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1915521514-44): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-11478400-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 40 Waited count: 3243 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2012b3d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33353): State: TIMED_WAITING Blocked count: 1 Waited count: 76 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 150 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@71ecfe19): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 250 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4afbb8fc): State: TIMED_WAITING Blocked count: 0 Waited count: 150 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 251 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 73907 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1579 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@61dad15e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33353): State: TIMED_WAITING Blocked count: 69 Waited count: 2714 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33353): State: TIMED_WAITING Blocked count: 66 Waited count: 2707 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33353): State: TIMED_WAITING Blocked count: 98 Waited count: 2727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33353): State: TIMED_WAITING Blocked count: 100 Waited count: 2718 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33353): State: TIMED_WAITING Blocked count: 74 Waited count: 2726 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5d18f67d): State: TIMED_WAITING Blocked count: 0 Waited count: 375 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@17d3a048): State: TIMED_WAITING Blocked count: 0 Waited count: 150 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@65a3f04a): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@316beb8c): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(826874655)): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp988370956-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp988370956-87-acceptor-0@406e3e45-ServerConnector@50927177{HTTP/1.1, (http/1.1)}{localhost:40107}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp988370956-88): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp988370956-89): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-6fb23b47-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@11bf4d1): State: TIMED_WAITING Blocked count: 0 Waited count: 1496 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 40851): State: TIMED_WAITING Blocked count: 1 Waited count: 76 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 150 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 404 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f578741 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 1450 Waited count: 1710 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3652a1d2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 748 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 749 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 749 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 748 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 40851): State: TIMED_WAITING Blocked count: 0 Waited count: 748 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 110 (IPC Client (166867004) connection to localhost/127.0.0.1:33353 from jenkins): State: TIMED_WAITING Blocked count: 1727 Waited count: 1728 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 111 (IPC Parameter Sending Thread for localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 0 Waited count: 2374 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp558478696-120): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp558478696-121-acceptor-0@5c07940c-ServerConnector@41d36900{HTTP/1.1, (http/1.1)}{localhost:42853}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp558478696-122): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp558478696-123): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (Session-HouseKeeper-2536a978-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65942ca9): State: TIMED_WAITING Blocked count: 0 Waited count: 1494 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 34129): State: TIMED_WAITING Blocked count: 1 Waited count: 76 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 150 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 1 Waited count: 377 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18ecd284 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 1463 Waited count: 1717 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5b3b9a90): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 750 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 752 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 749 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 749 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 34129): State: TIMED_WAITING Blocked count: 0 Waited count: 750 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data1)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 157 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 160 (qtp1414918684-160): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f44a442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (qtp1414918684-162-acceptor-0@67d4dc1c-ServerConnector@3c465c66{HTTP/1.1, (http/1.1)}{localhost:45835}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 164 (qtp1414918684-164): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 166 (qtp1414918684-166): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data1/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (Session-HouseKeeper-4356fc3b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data2/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data3/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 186 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22df2b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 185 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data4/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (java.util.concurrent.ThreadPoolExecutor$Worker@362a7361[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27ddbed9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (java.util.concurrent.ThreadPoolExecutor$Worker@22b395e0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4af8baf3): State: TIMED_WAITING Blocked count: 0 Waited count: 1493 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 201 (IPC Server idle connection scanner for port 35865): State: TIMED_WAITING Blocked count: 1 Waited count: 76 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 203 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 150 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 206 (Command processor): State: WAITING Blocked count: 0 Waited count: 383 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@aea441 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 207 (BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353): State: TIMED_WAITING Blocked count: 1440 Waited count: 1704 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7cd66212): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 199 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 209 (IPC Server handler 0 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 752 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 751 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 750 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 3 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 747 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 4 on default port 35865): State: TIMED_WAITING Blocked count: 0 Waited count: 752 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data5/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data6/current/BP-1163377979-172.17.0.2-1733966095287): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-34-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ca3aa39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@aba6f66[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:54997): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 76 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 374 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 7 Waited count: 413 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e781bcd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:54997):): State: WAITING Blocked count: 2 Waited count: 495 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63202a16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 546 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4273a629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@3dda3aa8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 471 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 20 Waited count: 8 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:54997)): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d560b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 75 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1478565a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 7 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c2ef6e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@781b3107 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36083): State: WAITING Blocked count: 103 Waited count: 473 Waiting on java.util.concurrent.Semaphore$NonfairSync@617665a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36083): State: WAITING Blocked count: 140 Waited count: 539 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a8d41d3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36083): State: WAITING Blocked count: 83 Waited count: 10367 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c11145e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36083): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3da39dd2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3da39dd2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6faa9487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f89d378 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5b2e8d33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=36083): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7ac2c689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41777bfa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 44 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;36236b0d7605:36083): State: TIMED_WAITING Blocked count: 12 Waited count: 4437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1411/0x00007f44a5202b58.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/36236b0d7605:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/36236b0d7605:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@43e497f9): State: TIMED_WAITING Blocked count: 0 Waited count: 248 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 7421 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 51 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 62 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 409 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 173 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79444e40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 420 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 74146 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 430 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 37 Waited count: 12 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 34 Waited count: 15 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 451 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1490f112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (regionserver/36236b0d7605:0.procedureResultReporter): State: WAITING Blocked count: 23 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@255ccb36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/36236b0d7605:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e31b793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 469 (regionserver/36236b0d7605:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@559e6eda Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 506 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 515 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 516 (region-location-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 73971 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 577 (region-location-1): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 578 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 991 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 957 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1046 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1076 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1090 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 113 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@50639122 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1134 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1135 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1136 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1198 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1199 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1200 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1249 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1250 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1251 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1253 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1254 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1609 (Container metrics unregistration): State: WAITING Blocked count: 12 Waited count: 48 Waiting on java.util.TaskQueue@3ef03be9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1967 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1968 (region-location-4): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46150d1a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6092 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6093 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10139 (java.util.concurrent.ThreadPoolExecutor$Worker@7f2aecc6[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20e1f071 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10262 (AsyncFSWAL-1-hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData-prefix:36236b0d7605,36083,1733966100114): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@381854af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10282 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 10283 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10284 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1402/0x00007f44a51fb250.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-12T01:27:28,916 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T01:27:29,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/WALs/36236b0d7605,36083,1733966100114/36236b0d7605%2C36083%2C1733966100114.1733966102193 after 4000ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T01:27:30,082 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-12T01:27:30,082 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-12T01:27:30,082 INFO [M:0;36236b0d7605:36083 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-12T01:27:30,082 INFO [M:0;36236b0d7605:36083 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36083 2024-12-12T01:27:30,083 INFO [M:0;36236b0d7605:36083 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-12T01:27:30,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33353/user/jenkins/test-data/2bdbe4e3-0ba9-edb3-0b82-f8b2f0c29221/MasterData/WALs/36236b0d7605,36083,1733966100114/36236b0d7605%2C36083%2C1733966100114.1733966102193 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-12T01:27:30,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T01:27:30,185 INFO [M:0;36236b0d7605:36083 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-12T01:27:30,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36083-0x10083caab1a0000, quorum=127.0.0.1:54997, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T01:27:30,188 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b930908{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T01:27:30,188 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c465c66{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T01:27:30,188 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T01:27:30,189 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ef7cd2a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T01:27:30,189 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@80b4649{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,STOPPED} 2024-12-12T01:27:30,190 WARN [BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T01:27:30,190 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T01:27:30,190 WARN [BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1163377979-172.17.0.2-1733966095287 (Datanode Uuid 96a69992-28f1-447f-a6d6-ea4f454937b0) service to localhost/127.0.0.1:33353 2024-12-12T01:27:30,190 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T01:27:30,192 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data5/current/BP-1163377979-172.17.0.2-1733966095287 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T01:27:30,192 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data6/current/BP-1163377979-172.17.0.2-1733966095287 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T01:27:30,192 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T01:27:30,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@486061d4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T01:27:30,195 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41d36900{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T01:27:30,195 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T01:27:30,195 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71ade74d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T01:27:30,195 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3188e696{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,STOPPED} 2024-12-12T01:27:30,196 WARN [BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T01:27:30,196 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T01:27:30,196 WARN [BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1163377979-172.17.0.2-1733966095287 (Datanode Uuid 0f665c89-e3d9-45cc-bcb2-2a3b8d2b6300) service to localhost/127.0.0.1:33353 2024-12-12T01:27:30,196 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T01:27:30,197 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data3/current/BP-1163377979-172.17.0.2-1733966095287 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T01:27:30,197 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data4/current/BP-1163377979-172.17.0.2-1733966095287 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T01:27:30,197 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T01:27:30,199 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@323a2aa2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T01:27:30,199 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@50927177{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T01:27:30,199 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T01:27:30,199 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ae01b05{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T01:27:30,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4107ad3f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,STOPPED} 2024-12-12T01:27:30,201 WARN [BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T01:27:30,201 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T01:27:30,201 WARN [BP-1163377979-172.17.0.2-1733966095287 heartbeating to localhost/127.0.0.1:33353 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1163377979-172.17.0.2-1733966095287 (Datanode Uuid dba9e474-68ec-4a87-934b-bada2110a343) service to localhost/127.0.0.1:33353 2024-12-12T01:27:30,201 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T01:27:30,201 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data1/current/BP-1163377979-172.17.0.2-1733966095287 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T01:27:30,201 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/cluster_7215f513-b23d-db2d-82ca-bafa64b62c7e/data/data2/current/BP-1163377979-172.17.0.2-1733966095287 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T01:27:30,202 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T01:27:30,208 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@71d736e3{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T01:27:30,209 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f00f70b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T01:27:30,209 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T01:27:30,209 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62b16b1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T01:27:30,209 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a1d5810{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/3f0739fe-ea26-73de-5b54-1ed21b5d781d/hadoop.log.dir/,STOPPED} 2024-12-12T01:27:30,221 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-12T01:27:30,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down